Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:
	drivers/net/e1000/e1000_main.c
This commit is contained in:
David S. Miller 2009-01-30 14:31:07 -08:00
commit 05bee47377
49 changed files with 515 additions and 427 deletions

View File

@ -2,14 +2,14 @@
IP-Aliasing: IP-Aliasing:
============ ============
IP-aliases are additional IP-addresses/masks hooked up to a base IP-aliases are an obsolete way to manage multiple IP-addresses/masks
interface by adding a colon and a string when running ifconfig. per interface. Newer tools such as iproute2 support multiple
address/prefixes per interface, but aliases are still supported
for backwards compatibility.
An alias is formed by adding a colon and a string when running ifconfig.
This string is usually numeric, but this is not a must. This string is usually numeric, but this is not a must.
IP-Aliases are avail if CONFIG_INET (`standard' IPv4 networking)
is configured in the kernel.
o Alias creation. o Alias creation.
Alias creation is done by 'magic' interface naming: eg. to create a Alias creation is done by 'magic' interface naming: eg. to create a
200.1.1.1 alias for eth0 ... 200.1.1.1 alias for eth0 ...
@ -38,16 +38,3 @@ o Relationship with main device
If the base device is shut down the added aliases will be deleted If the base device is shut down the added aliases will be deleted
too. too.
Contact
-------
Please finger or e-mail me:
Juan Jose Ciarlante <jjciarla@raiz.uncu.edu.ar>
Updated by Erik Schoenfelder <schoenfr@gaertner.DE>
; local variables:
; mode: indented-text
; mode: auto-fill
; end:

View File

@ -2836,8 +2836,6 @@ S: Maintained
MAC80211 MAC80211
P: Johannes Berg P: Johannes Berg
M: johannes@sipsolutions.net M: johannes@sipsolutions.net
P: Michael Wu
M: flamingice@sourmilk.net
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
W: http://linuxwireless.org/ W: http://linuxwireless.org/
T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git

View File

@ -31,7 +31,7 @@
char e1000_driver_name[] = "e1000"; char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#define DRV_VERSION "7.3.21-k2-NAPI" #define DRV_VERSION "7.3.21-k3-NAPI"
const char e1000_driver_version[] = DRV_VERSION; const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@ -3697,7 +3697,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u32 rctl, icr = er32(ICR); u32 rctl, icr = er32(ICR);
if (unlikely(!icr)) if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags)))
return IRQ_NONE; /* Not our interrupt */ return IRQ_NONE; /* Not our interrupt */
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is

View File

@ -234,6 +234,8 @@ static int gfar_mdio_probe(struct of_device *ofdev,
if (NULL == new_bus) if (NULL == new_bus)
return -ENOMEM; return -ENOMEM;
device_init_wakeup(&ofdev->dev, 1);
new_bus->name = "Gianfar MII Bus", new_bus->name = "Gianfar MII Bus",
new_bus->read = &gfar_mdio_read, new_bus->read = &gfar_mdio_read,
new_bus->write = &gfar_mdio_write, new_bus->write = &gfar_mdio_write,

View File

@ -210,7 +210,7 @@
#define MAX_CMD_DESCRIPTORS_HOST 1024 #define MAX_CMD_DESCRIPTORS_HOST 1024
#define MAX_RCV_DESCRIPTORS_1G 2048 #define MAX_RCV_DESCRIPTORS_1G 2048
#define MAX_RCV_DESCRIPTORS_10G 4096 #define MAX_RCV_DESCRIPTORS_10G 4096
#define MAX_JUMBO_RCV_DESCRIPTORS 512 #define MAX_JUMBO_RCV_DESCRIPTORS 1024
#define MAX_LRO_RCV_DESCRIPTORS 8 #define MAX_LRO_RCV_DESCRIPTORS 8
#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS #define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS
#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS #define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS

View File

@ -947,8 +947,10 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
} }
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
kfree(buf);
return -EIO; return -EIO;
}
buf[i].addr = addr; buf[i].addr = addr;
buf[i].data = val; buf[i].data = val;

View File

@ -438,7 +438,6 @@ static void r6040_down(struct net_device *dev)
{ {
struct r6040_private *lp = netdev_priv(dev); struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base; void __iomem *ioaddr = lp->base;
struct pci_dev *pdev = lp->pdev;
int limit = 2048; int limit = 2048;
u16 *adrp; u16 *adrp;
u16 cmd; u16 cmd;

View File

@ -675,9 +675,8 @@ static int efx_init_port(struct efx_nic *efx)
rc = efx->phy_op->init(efx); rc = efx->phy_op->init(efx);
if (rc) if (rc)
return rc; return rc;
efx->phy_op->reconfigure(efx);
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
efx->phy_op->reconfigure(efx);
rc = falcon_switch_mac(efx); rc = falcon_switch_mac(efx);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
if (rc) if (rc)
@ -685,7 +684,7 @@ static int efx_init_port(struct efx_nic *efx)
efx->mac_op->reconfigure(efx); efx->mac_op->reconfigure(efx);
efx->port_initialized = true; efx->port_initialized = true;
efx->stats_enabled = true; efx_stats_enable(efx);
return 0; return 0;
fail: fail:
@ -734,6 +733,7 @@ static void efx_fini_port(struct efx_nic *efx)
if (!efx->port_initialized) if (!efx->port_initialized)
return; return;
efx_stats_disable(efx);
efx->phy_op->fini(efx); efx->phy_op->fini(efx);
efx->port_initialized = false; efx->port_initialized = false;
@ -1352,6 +1352,20 @@ static int efx_net_stop(struct net_device *net_dev)
return 0; return 0;
} }
void efx_stats_disable(struct efx_nic *efx)
{
spin_lock(&efx->stats_lock);
++efx->stats_disable_count;
spin_unlock(&efx->stats_lock);
}
void efx_stats_enable(struct efx_nic *efx)
{
spin_lock(&efx->stats_lock);
--efx->stats_disable_count;
spin_unlock(&efx->stats_lock);
}
/* Context: process, dev_base_lock or RTNL held, non-blocking. */ /* Context: process, dev_base_lock or RTNL held, non-blocking. */
static struct net_device_stats *efx_net_stats(struct net_device *net_dev) static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
{ {
@ -1360,12 +1374,12 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
struct net_device_stats *stats = &net_dev->stats; struct net_device_stats *stats = &net_dev->stats;
/* Update stats if possible, but do not wait if another thread /* Update stats if possible, but do not wait if another thread
* is updating them (or resetting the NIC); slightly stale * is updating them or if MAC stats fetches are temporarily
* stats are acceptable. * disabled; slightly stale stats are acceptable.
*/ */
if (!spin_trylock(&efx->stats_lock)) if (!spin_trylock(&efx->stats_lock))
return stats; return stats;
if (efx->stats_enabled) { if (!efx->stats_disable_count) {
efx->mac_op->update_stats(efx); efx->mac_op->update_stats(efx);
falcon_update_nic_stats(efx); falcon_update_nic_stats(efx);
} }
@ -1613,16 +1627,12 @@ static void efx_unregister_netdev(struct efx_nic *efx)
/* Tears down the entire software state and most of the hardware state /* Tears down the entire software state and most of the hardware state
* before reset. */ * before reset. */
void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd) void efx_reset_down(struct efx_nic *efx, enum reset_type method,
struct ethtool_cmd *ecmd)
{ {
EFX_ASSERT_RESET_SERIALISED(efx); EFX_ASSERT_RESET_SERIALISED(efx);
/* The net_dev->get_stats handler is quite slow, and will fail efx_stats_disable(efx);
* if a fetch is pending over reset. Serialise against it. */
spin_lock(&efx->stats_lock);
efx->stats_enabled = false;
spin_unlock(&efx->stats_lock);
efx_stop_all(efx); efx_stop_all(efx);
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
mutex_lock(&efx->spi_lock); mutex_lock(&efx->spi_lock);
@ -1630,6 +1640,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
efx->phy_op->get_settings(efx, ecmd); efx->phy_op->get_settings(efx, ecmd);
efx_fini_channels(efx); efx_fini_channels(efx);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
efx->phy_op->fini(efx);
} }
/* This function will always ensure that the locks acquired in /* This function will always ensure that the locks acquired in
@ -1637,7 +1649,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
* that we were unable to reinitialise the hardware, and the * that we were unable to reinitialise the hardware, and the
* driver should be disabled. If ok is false, then the rx and tx * driver should be disabled. If ok is false, then the rx and tx
* engines are not restarted, pending a RESET_DISABLE. */ * engines are not restarted, pending a RESET_DISABLE. */
int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok) int efx_reset_up(struct efx_nic *efx, enum reset_type method,
struct ethtool_cmd *ecmd, bool ok)
{ {
int rc; int rc;
@ -1649,6 +1662,15 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
ok = false; ok = false;
} }
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
if (ok) {
rc = efx->phy_op->init(efx);
if (rc)
ok = false;
} else
efx->port_initialized = false;
}
if (ok) { if (ok) {
efx_init_channels(efx); efx_init_channels(efx);
@ -1661,7 +1683,7 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
if (ok) { if (ok) {
efx_start_all(efx); efx_start_all(efx);
efx->stats_enabled = true; efx_stats_enable(efx);
} }
return rc; return rc;
} }
@ -1693,7 +1715,7 @@ static int efx_reset(struct efx_nic *efx)
EFX_INFO(efx, "resetting (%d)\n", method); EFX_INFO(efx, "resetting (%d)\n", method);
efx_reset_down(efx, &ecmd); efx_reset_down(efx, method, &ecmd);
rc = falcon_reset_hw(efx, method); rc = falcon_reset_hw(efx, method);
if (rc) { if (rc) {
@ -1712,10 +1734,10 @@ static int efx_reset(struct efx_nic *efx)
/* Leave device stopped if necessary */ /* Leave device stopped if necessary */
if (method == RESET_TYPE_DISABLE) { if (method == RESET_TYPE_DISABLE) {
efx_reset_up(efx, &ecmd, false); efx_reset_up(efx, method, &ecmd, false);
rc = -EIO; rc = -EIO;
} else { } else {
rc = efx_reset_up(efx, &ecmd, true); rc = efx_reset_up(efx, method, &ecmd, true);
} }
out_disable: out_disable:
@ -1867,6 +1889,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
efx->rx_checksum_enabled = true; efx->rx_checksum_enabled = true;
spin_lock_init(&efx->netif_stop_lock); spin_lock_init(&efx->netif_stop_lock);
spin_lock_init(&efx->stats_lock); spin_lock_init(&efx->stats_lock);
efx->stats_disable_count = 1;
mutex_init(&efx->mac_lock); mutex_init(&efx->mac_lock);
efx->mac_op = &efx_dummy_mac_operations; efx->mac_op = &efx_dummy_mac_operations;
efx->phy_op = &efx_dummy_phy_operations; efx->phy_op = &efx_dummy_phy_operations;

View File

@ -36,13 +36,16 @@ extern void efx_process_channel_now(struct efx_channel *channel);
extern void efx_flush_queues(struct efx_nic *efx); extern void efx_flush_queues(struct efx_nic *efx);
/* Ports */ /* Ports */
extern void efx_stats_disable(struct efx_nic *efx);
extern void efx_stats_enable(struct efx_nic *efx);
extern void efx_reconfigure_port(struct efx_nic *efx); extern void efx_reconfigure_port(struct efx_nic *efx);
extern void __efx_reconfigure_port(struct efx_nic *efx); extern void __efx_reconfigure_port(struct efx_nic *efx);
/* Reset handling */ /* Reset handling */
extern void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd); extern void efx_reset_down(struct efx_nic *efx, enum reset_type method,
extern int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, struct ethtool_cmd *ecmd);
bool ok); extern int efx_reset_up(struct efx_nic *efx, enum reset_type method,
struct ethtool_cmd *ecmd, bool ok);
/* Global */ /* Global */
extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);

View File

@ -219,9 +219,6 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
int rc; int rc;
if (EFX_WORKAROUND_13963(efx) && !ecmd->autoneg)
return -EINVAL;
/* Falcon GMAC does not support 1000Mbps HD */ /* Falcon GMAC does not support 1000Mbps HD */
if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
EFX_LOG(efx, "rejecting unsupported 1000Mbps HD" EFX_LOG(efx, "rejecting unsupported 1000Mbps HD"

View File

@ -824,10 +824,6 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
rx_ev_pause_frm ? " [PAUSE]" : ""); rx_ev_pause_frm ? " [PAUSE]" : "");
} }
#endif #endif
if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) &&
efx->phy_type == PHY_TYPE_SFX7101))
tenxpress_crc_err(efx);
} }
/* Handle receive events that are not in-order. */ /* Handle receive events that are not in-order. */
@ -1887,7 +1883,7 @@ static int falcon_reset_macs(struct efx_nic *efx)
/* MAC stats will fail whilst the TX fifo is draining. Serialise /* MAC stats will fail whilst the TX fifo is draining. Serialise
* the drain sequence with the statistics fetch */ * the drain sequence with the statistics fetch */
spin_lock(&efx->stats_lock); efx_stats_disable(efx);
falcon_read(efx, &reg, MAC0_CTRL_REG_KER); falcon_read(efx, &reg, MAC0_CTRL_REG_KER);
EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1); EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1);
@ -1917,7 +1913,7 @@ static int falcon_reset_macs(struct efx_nic *efx)
udelay(10); udelay(10);
} }
spin_unlock(&efx->stats_lock); efx_stats_enable(efx);
/* If we've reset the EM block and the link is up, then /* If we've reset the EM block and the link is up, then
* we'll have to kick the XAUI link so the PHY can recover */ * we'll have to kick the XAUI link so the PHY can recover */
@ -2277,6 +2273,10 @@ int falcon_switch_mac(struct efx_nic *efx)
struct efx_mac_operations *old_mac_op = efx->mac_op; struct efx_mac_operations *old_mac_op = efx->mac_op;
efx_oword_t nic_stat; efx_oword_t nic_stat;
unsigned strap_val; unsigned strap_val;
int rc = 0;
/* Don't try to fetch MAC stats while we're switching MACs */
efx_stats_disable(efx);
/* Internal loopbacks override the phy speed setting */ /* Internal loopbacks override the phy speed setting */
if (efx->loopback_mode == LOOPBACK_GMAC) { if (efx->loopback_mode == LOOPBACK_GMAC) {
@ -2287,16 +2287,12 @@ int falcon_switch_mac(struct efx_nic *efx)
efx->link_fd = true; efx->link_fd = true;
} }
WARN_ON(!mutex_is_locked(&efx->mac_lock));
efx->mac_op = (EFX_IS10G(efx) ? efx->mac_op = (EFX_IS10G(efx) ?
&falcon_xmac_operations : &falcon_gmac_operations); &falcon_xmac_operations : &falcon_gmac_operations);
if (old_mac_op == efx->mac_op)
return 0;
WARN_ON(!mutex_is_locked(&efx->mac_lock));
/* Not all macs support a mac-level link state */
efx->mac_up = true;
/* Always push the NIC_STAT_REG setting even if the mac hasn't
* changed, because this function is run post online reset */
falcon_read(efx, &nic_stat, NIC_STAT_REG); falcon_read(efx, &nic_stat, NIC_STAT_REG);
strap_val = EFX_IS10G(efx) ? 5 : 3; strap_val = EFX_IS10G(efx) ? 5 : 3;
if (falcon_rev(efx) >= FALCON_REV_B0) { if (falcon_rev(efx) >= FALCON_REV_B0) {
@ -2309,9 +2305,17 @@ int falcon_switch_mac(struct efx_nic *efx)
BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val); BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val);
} }
if (old_mac_op == efx->mac_op)
goto out;
EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G'); EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
return falcon_reset_macs(efx); /* Not all macs support a mac-level link state */
efx->mac_up = true;
rc = falcon_reset_macs(efx);
out:
efx_stats_enable(efx);
return rc;
} }
/* This call is responsible for hooking in the MAC and PHY operations */ /* This call is responsible for hooking in the MAC and PHY operations */

View File

@ -15,6 +15,7 @@
#include "net_driver.h" #include "net_driver.h"
#include "mdio_10g.h" #include "mdio_10g.h"
#include "boards.h" #include "boards.h"
#include "workarounds.h"
int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd, int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd,
int spins, int spintime) int spins, int spintime)
@ -179,17 +180,12 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
return false; return false;
else if (efx_phy_mode_disabled(efx->phy_mode)) else if (efx_phy_mode_disabled(efx->phy_mode))
return false; return false;
else if (efx->loopback_mode == LOOPBACK_PHYXS) { else if (efx->loopback_mode == LOOPBACK_PHYXS)
mmd_mask &= ~(MDIO_MMDREG_DEVS_PHYXS | mmd_mask &= ~(MDIO_MMDREG_DEVS_PHYXS |
MDIO_MMDREG_DEVS_PCS | MDIO_MMDREG_DEVS_PCS |
MDIO_MMDREG_DEVS_PMAPMD | MDIO_MMDREG_DEVS_PMAPMD |
MDIO_MMDREG_DEVS_AN); MDIO_MMDREG_DEVS_AN);
if (!mmd_mask) { else if (efx->loopback_mode == LOOPBACK_PCS)
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
MDIO_PHYXS_STATUS2);
return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN));
}
} else if (efx->loopback_mode == LOOPBACK_PCS)
mmd_mask &= ~(MDIO_MMDREG_DEVS_PCS | mmd_mask &= ~(MDIO_MMDREG_DEVS_PCS |
MDIO_MMDREG_DEVS_PMAPMD | MDIO_MMDREG_DEVS_PMAPMD |
MDIO_MMDREG_DEVS_AN); MDIO_MMDREG_DEVS_AN);
@ -197,6 +193,13 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
mmd_mask &= ~(MDIO_MMDREG_DEVS_PMAPMD | mmd_mask &= ~(MDIO_MMDREG_DEVS_PMAPMD |
MDIO_MMDREG_DEVS_AN); MDIO_MMDREG_DEVS_AN);
if (!mmd_mask) {
/* Use presence of XGMII faults in leui of link state */
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
MDIO_PHYXS_STATUS2);
return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN));
}
while (mmd_mask) { while (mmd_mask) {
if (mmd_mask & 1) { if (mmd_mask & 1) {
/* Double reads because link state is latched, and a /* Double reads because link state is latched, and a
@ -263,7 +266,7 @@ void mdio_clause45_set_mmds_lpower(struct efx_nic *efx,
} }
} }
static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp) static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr)
{ {
int phy_id = efx->mii.phy_id; int phy_id = efx->mii.phy_id;
u32 result = 0; u32 result = 0;
@ -278,9 +281,6 @@ static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp)
result |= ADVERTISED_100baseT_Half; result |= ADVERTISED_100baseT_Half;
if (reg & ADVERTISE_100FULL) if (reg & ADVERTISE_100FULL)
result |= ADVERTISED_100baseT_Full; result |= ADVERTISED_100baseT_Full;
if (reg & LPA_RESV)
result |= xnp;
return result; return result;
} }
@ -310,7 +310,7 @@ void mdio_clause45_get_settings(struct efx_nic *efx,
*/ */
void mdio_clause45_get_settings_ext(struct efx_nic *efx, void mdio_clause45_get_settings_ext(struct efx_nic *efx,
struct ethtool_cmd *ecmd, struct ethtool_cmd *ecmd,
u32 xnp, u32 xnp_lpa) u32 npage_adv, u32 npage_lpa)
{ {
int phy_id = efx->mii.phy_id; int phy_id = efx->mii.phy_id;
int reg; int reg;
@ -361,8 +361,8 @@ void mdio_clause45_get_settings_ext(struct efx_nic *efx,
ecmd->autoneg = AUTONEG_ENABLE; ecmd->autoneg = AUTONEG_ENABLE;
ecmd->advertising |= ecmd->advertising |=
ADVERTISED_Autoneg | ADVERTISED_Autoneg |
mdio_clause45_get_an(efx, mdio_clause45_get_an(efx, MDIO_AN_ADVERTISE) |
MDIO_AN_ADVERTISE, xnp); npage_adv;
} else } else
ecmd->autoneg = AUTONEG_DISABLE; ecmd->autoneg = AUTONEG_DISABLE;
} else } else
@ -371,27 +371,30 @@ void mdio_clause45_get_settings_ext(struct efx_nic *efx,
if (ecmd->autoneg) { if (ecmd->autoneg) {
/* If AN is complete, report best common mode, /* If AN is complete, report best common mode,
* otherwise report best advertised mode. */ * otherwise report best advertised mode. */
u32 common = ecmd->advertising; u32 modes = 0;
if (mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, if (mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
MDIO_MMDREG_STAT1) & MDIO_MMDREG_STAT1) &
(1 << MDIO_AN_STATUS_AN_DONE_LBN)) { (1 << MDIO_AN_STATUS_AN_DONE_LBN))
common &= mdio_clause45_get_an(efx, MDIO_AN_LPA, modes = (ecmd->advertising &
xnp_lpa); (mdio_clause45_get_an(efx, MDIO_AN_LPA) |
} npage_lpa));
if (common & ADVERTISED_10000baseT_Full) { if (modes == 0)
modes = ecmd->advertising;
if (modes & ADVERTISED_10000baseT_Full) {
ecmd->speed = SPEED_10000; ecmd->speed = SPEED_10000;
ecmd->duplex = DUPLEX_FULL; ecmd->duplex = DUPLEX_FULL;
} else if (common & (ADVERTISED_1000baseT_Full | } else if (modes & (ADVERTISED_1000baseT_Full |
ADVERTISED_1000baseT_Half)) { ADVERTISED_1000baseT_Half)) {
ecmd->speed = SPEED_1000; ecmd->speed = SPEED_1000;
ecmd->duplex = !!(common & ADVERTISED_1000baseT_Full); ecmd->duplex = !!(modes & ADVERTISED_1000baseT_Full);
} else if (common & (ADVERTISED_100baseT_Full | } else if (modes & (ADVERTISED_100baseT_Full |
ADVERTISED_100baseT_Half)) { ADVERTISED_100baseT_Half)) {
ecmd->speed = SPEED_100; ecmd->speed = SPEED_100;
ecmd->duplex = !!(common & ADVERTISED_100baseT_Full); ecmd->duplex = !!(modes & ADVERTISED_100baseT_Full);
} else { } else {
ecmd->speed = SPEED_10; ecmd->speed = SPEED_10;
ecmd->duplex = !!(common & ADVERTISED_10baseT_Full); ecmd->duplex = !!(modes & ADVERTISED_10baseT_Full);
} }
} else { } else {
/* Report forced settings */ /* Report forced settings */
@ -415,7 +418,7 @@ int mdio_clause45_set_settings(struct efx_nic *efx,
int phy_id = efx->mii.phy_id; int phy_id = efx->mii.phy_id;
struct ethtool_cmd prev; struct ethtool_cmd prev;
u32 required; u32 required;
int ctrl1_bits, reg; int reg;
efx->phy_op->get_settings(efx, &prev); efx->phy_op->get_settings(efx, &prev);
@ -430,73 +433,32 @@ int mdio_clause45_set_settings(struct efx_nic *efx,
if (prev.port != PORT_TP || ecmd->port != PORT_TP) if (prev.port != PORT_TP || ecmd->port != PORT_TP)
return -EINVAL; return -EINVAL;
/* Check that PHY supports these settings and work out the /* Check that PHY supports these settings */
* basic control bits */ if (ecmd->autoneg) {
if (ecmd->duplex) { required = SUPPORTED_Autoneg;
} else if (ecmd->duplex) {
switch (ecmd->speed) { switch (ecmd->speed) {
case SPEED_10: case SPEED_10: required = SUPPORTED_10baseT_Full; break;
ctrl1_bits = BMCR_FULLDPLX; case SPEED_100: required = SUPPORTED_100baseT_Full; break;
required = SUPPORTED_10baseT_Full; default: return -EINVAL;
break;
case SPEED_100:
ctrl1_bits = BMCR_SPEED100 | BMCR_FULLDPLX;
required = SUPPORTED_100baseT_Full;
break;
case SPEED_1000:
ctrl1_bits = BMCR_SPEED1000 | BMCR_FULLDPLX;
required = SUPPORTED_1000baseT_Full;
break;
case SPEED_10000:
ctrl1_bits = (BMCR_SPEED1000 | BMCR_SPEED100 |
BMCR_FULLDPLX);
required = SUPPORTED_10000baseT_Full;
break;
default:
return -EINVAL;
} }
} else { } else {
switch (ecmd->speed) { switch (ecmd->speed) {
case SPEED_10: case SPEED_10: required = SUPPORTED_10baseT_Half; break;
ctrl1_bits = 0; case SPEED_100: required = SUPPORTED_100baseT_Half; break;
required = SUPPORTED_10baseT_Half; default: return -EINVAL;
break;
case SPEED_100:
ctrl1_bits = BMCR_SPEED100;
required = SUPPORTED_100baseT_Half;
break;
case SPEED_1000:
ctrl1_bits = BMCR_SPEED1000;
required = SUPPORTED_1000baseT_Half;
break;
default:
return -EINVAL;
} }
} }
if (ecmd->autoneg)
required |= SUPPORTED_Autoneg;
required |= ecmd->advertising; required |= ecmd->advertising;
if (required & ~prev.supported) if (required & ~prev.supported)
return -EINVAL; return -EINVAL;
/* Set the basic control bits */
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
MDIO_MMDREG_CTRL1);
reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX | 0x003c);
reg |= ctrl1_bits;
mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, MDIO_MMDREG_CTRL1,
reg);
/* Set the AN registers */
if (ecmd->autoneg != prev.autoneg ||
ecmd->advertising != prev.advertising) {
bool xnp = false;
if (efx->phy_op->set_xnp_advertise)
xnp = efx->phy_op->set_xnp_advertise(efx,
ecmd->advertising);
if (ecmd->autoneg) { if (ecmd->autoneg) {
reg = 0; bool xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full
|| EFX_WORKAROUND_13204(efx));
/* Set up the base page */
reg = ADVERTISE_CSMA;
if (ecmd->advertising & ADVERTISED_10baseT_Half) if (ecmd->advertising & ADVERTISED_10baseT_Half)
reg |= ADVERTISE_10HALF; reg |= ADVERTISE_10HALF;
if (ecmd->advertising & ADVERTISED_10baseT_Full) if (ecmd->advertising & ADVERTISED_10baseT_Full)
@ -507,22 +469,47 @@ int mdio_clause45_set_settings(struct efx_nic *efx,
reg |= ADVERTISE_100FULL; reg |= ADVERTISE_100FULL;
if (xnp) if (xnp)
reg |= ADVERTISE_RESV; reg |= ADVERTISE_RESV;
else if (ecmd->advertising & (ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full))
reg |= ADVERTISE_NPAGE;
reg |= efx_fc_advertise(efx->wanted_fc);
mdio_clause45_write(efx, phy_id, MDIO_MMD_AN, mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
MDIO_AN_ADVERTISE, reg); MDIO_AN_ADVERTISE, reg);
}
/* Set up the (extended) next page if necessary */
if (efx->phy_op->set_npage_adv)
efx->phy_op->set_npage_adv(efx, ecmd->advertising);
/* Enable and restart AN */
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
MDIO_MMDREG_CTRL1); MDIO_MMDREG_CTRL1);
if (ecmd->autoneg) reg |= BMCR_ANENABLE;
reg |= BMCR_ANENABLE | BMCR_ANRESTART; if (!(EFX_WORKAROUND_15195(efx) &&
else LOOPBACK_MASK(efx) & efx->phy_op->loopbacks))
reg &= ~BMCR_ANENABLE; reg |= BMCR_ANRESTART;
if (xnp) if (xnp)
reg |= 1 << MDIO_AN_CTRL_XNP_LBN; reg |= 1 << MDIO_AN_CTRL_XNP_LBN;
else else
reg &= ~(1 << MDIO_AN_CTRL_XNP_LBN); reg &= ~(1 << MDIO_AN_CTRL_XNP_LBN);
mdio_clause45_write(efx, phy_id, MDIO_MMD_AN, mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
MDIO_MMDREG_CTRL1, reg); MDIO_MMDREG_CTRL1, reg);
} else {
/* Disable AN */
mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN,
MDIO_MMDREG_CTRL1,
__ffs(BMCR_ANENABLE), false);
/* Set the basic control bits */
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
MDIO_MMDREG_CTRL1);
reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX |
0x003c);
if (ecmd->speed == SPEED_100)
reg |= BMCR_SPEED100;
if (ecmd->duplex)
reg |= BMCR_FULLDPLX;
mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
MDIO_MMDREG_CTRL1, reg);
} }
return 0; return 0;

View File

@ -155,7 +155,8 @@
#define MDIO_AN_XNP 22 #define MDIO_AN_XNP 22
#define MDIO_AN_LPA_XNP 25 #define MDIO_AN_LPA_XNP 25
#define MDIO_AN_10GBT_ADVERTISE 32 #define MDIO_AN_10GBT_CTRL 32
#define MDIO_AN_10GBT_CTRL_ADV_10G_LBN 12
#define MDIO_AN_10GBT_STATUS (33) #define MDIO_AN_10GBT_STATUS (33)
#define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */ #define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */
#define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */ #define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */

View File

@ -557,7 +557,7 @@ struct efx_mac_operations {
* @poll: Poll for hardware state. Serialised by the mac_lock. * @poll: Poll for hardware state. Serialised by the mac_lock.
* @get_settings: Get ethtool settings. Serialised by the mac_lock. * @get_settings: Get ethtool settings. Serialised by the mac_lock.
* @set_settings: Set ethtool settings. Serialised by the mac_lock. * @set_settings: Set ethtool settings. Serialised by the mac_lock.
* @set_xnp_advertise: Set abilities advertised in Extended Next Page * @set_npage_adv: Set abilities advertised in (Extended) Next Page
* (only needed where AN bit is set in mmds) * (only needed where AN bit is set in mmds)
* @num_tests: Number of PHY-specific tests/results * @num_tests: Number of PHY-specific tests/results
* @test_names: Names of the tests/results * @test_names: Names of the tests/results
@ -577,7 +577,7 @@ struct efx_phy_operations {
struct ethtool_cmd *ecmd); struct ethtool_cmd *ecmd);
int (*set_settings) (struct efx_nic *efx, int (*set_settings) (struct efx_nic *efx,
struct ethtool_cmd *ecmd); struct ethtool_cmd *ecmd);
bool (*set_xnp_advertise) (struct efx_nic *efx, u32); void (*set_npage_adv) (struct efx_nic *efx, u32);
u32 num_tests; u32 num_tests;
const char *const *test_names; const char *const *test_names;
int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
@ -745,8 +745,7 @@ union efx_multicast_hash {
* &struct net_device_stats. * &struct net_device_stats.
* @stats_buffer: DMA buffer for statistics * @stats_buffer: DMA buffer for statistics
* @stats_lock: Statistics update lock. Serialises statistics fetches * @stats_lock: Statistics update lock. Serialises statistics fetches
* @stats_enabled: Temporarily disable statistics fetches. * @stats_disable_count: Nest count for disabling statistics fetches
* Serialised by @stats_lock
* @mac_op: MAC interface * @mac_op: MAC interface
* @mac_address: Permanent MAC address * @mac_address: Permanent MAC address
* @phy_type: PHY type * @phy_type: PHY type
@ -828,7 +827,7 @@ struct efx_nic {
struct efx_mac_stats mac_stats; struct efx_mac_stats mac_stats;
struct efx_buffer stats_buffer; struct efx_buffer stats_buffer;
spinlock_t stats_lock; spinlock_t stats_lock;
bool stats_enabled; unsigned int stats_disable_count;
struct efx_mac_operations *mac_op; struct efx_mac_operations *mac_op;
unsigned char mac_address[ETH_ALEN]; unsigned char mac_address[ETH_ALEN];

View File

@ -17,7 +17,6 @@ extern struct efx_phy_operations falcon_sfx7101_phy_ops;
extern struct efx_phy_operations falcon_sft9001_phy_ops; extern struct efx_phy_operations falcon_sft9001_phy_ops;
extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink); extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink);
extern void tenxpress_crc_err(struct efx_nic *efx);
/**************************************************************************** /****************************************************************************
* Exported functions from the driver for XFP optical PHYs * Exported functions from the driver for XFP optical PHYs

View File

@ -665,6 +665,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
{ {
enum efx_loopback_mode loopback_mode = efx->loopback_mode; enum efx_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode; int phy_mode = efx->phy_mode;
enum reset_type reset_method = RESET_TYPE_INVISIBLE;
struct ethtool_cmd ecmd; struct ethtool_cmd ecmd;
struct efx_channel *channel; struct efx_channel *channel;
int rc_test = 0, rc_reset = 0, rc; int rc_test = 0, rc_reset = 0, rc;
@ -718,21 +719,21 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
/* free up all consumers of SRAM (including all the queues) */ /* free up all consumers of SRAM (including all the queues) */
efx_reset_down(efx, &ecmd); efx_reset_down(efx, reset_method, &ecmd);
rc = efx_test_chip(efx, tests); rc = efx_test_chip(efx, tests);
if (rc && !rc_test) if (rc && !rc_test)
rc_test = rc; rc_test = rc;
/* reset the chip to recover from the register test */ /* reset the chip to recover from the register test */
rc_reset = falcon_reset_hw(efx, RESET_TYPE_ALL); rc_reset = falcon_reset_hw(efx, reset_method);
/* Ensure that the phy is powered and out of loopback /* Ensure that the phy is powered and out of loopback
* for the bist and loopback tests */ * for the bist and loopback tests */
efx->phy_mode &= ~PHY_MODE_LOW_POWER; efx->phy_mode &= ~PHY_MODE_LOW_POWER;
efx->loopback_mode = LOOPBACK_NONE; efx->loopback_mode = LOOPBACK_NONE;
rc = efx_reset_up(efx, &ecmd, rc_reset == 0); rc = efx_reset_up(efx, reset_method, &ecmd, rc_reset == 0);
if (rc && !rc_reset) if (rc && !rc_reset)
rc_reset = rc; rc_reset = rc;

View File

@ -187,19 +187,22 @@ static int sfn4111t_reset(struct efx_nic *efx)
{ {
efx_oword_t reg; efx_oword_t reg;
/* GPIO pins are also used for I2C, so block that temporarily */ /* GPIO 3 and the GPIO register are shared with I2C, so block that */
mutex_lock(&efx->i2c_adap.bus_lock); mutex_lock(&efx->i2c_adap.bus_lock);
/* Pull RST_N (GPIO 2) low then let it up again, setting the
* FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
* output enables; the output levels should always be 0 (low)
* and we rely on external pull-ups. */
falcon_read(efx, &reg, GPIO_CTL_REG_KER); falcon_read(efx, &reg, GPIO_CTL_REG_KER);
EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true); EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true);
EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, false);
falcon_write(efx, &reg, GPIO_CTL_REG_KER); falcon_write(efx, &reg, GPIO_CTL_REG_KER);
msleep(1000); msleep(1000);
EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, true); EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, false);
EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, true); EFX_SET_OWORD_FIELD(reg, GPIO3_OEN,
EFX_SET_OWORD_FIELD(reg, GPIO3_OUT, !!(efx->phy_mode & PHY_MODE_SPECIAL));
!(efx->phy_mode & PHY_MODE_SPECIAL));
falcon_write(efx, &reg, GPIO_CTL_REG_KER); falcon_write(efx, &reg, GPIO_CTL_REG_KER);
msleep(1);
mutex_unlock(&efx->i2c_adap.bus_lock); mutex_unlock(&efx->i2c_adap.bus_lock);
@ -233,12 +236,18 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
err = -EBUSY; err = -EBUSY;
} else { } else {
/* Reset the PHY, reconfigure the MAC and enable/disable
* MAC stats accordingly. */
efx->phy_mode = new_mode; efx->phy_mode = new_mode;
if (new_mode & PHY_MODE_SPECIAL)
efx_stats_disable(efx);
if (efx->board_info.type == EFX_BOARD_SFE4001) if (efx->board_info.type == EFX_BOARD_SFE4001)
err = sfe4001_poweron(efx); err = sfe4001_poweron(efx);
else else
err = sfn4111t_reset(efx); err = sfn4111t_reset(efx);
efx_reconfigure_port(efx); efx_reconfigure_port(efx);
if (!(new_mode & PHY_MODE_SPECIAL))
efx_stats_enable(efx);
} }
rtnl_unlock(); rtnl_unlock();
@ -327,6 +336,11 @@ int sfe4001_init(struct efx_nic *efx)
efx->board_info.monitor = sfe4001_check_hw; efx->board_info.monitor = sfe4001_check_hw;
efx->board_info.fini = sfe4001_fini; efx->board_info.fini = sfe4001_fini;
if (efx->phy_mode & PHY_MODE_SPECIAL) {
/* PHY won't generate a 156.25 MHz clock and MAC stats fetch
* will fail. */
efx_stats_disable(efx);
}
rc = sfe4001_poweron(efx); rc = sfe4001_poweron(efx);
if (rc) if (rc)
goto fail_ioexp; goto fail_ioexp;
@ -373,17 +387,25 @@ static void sfn4111t_fini(struct efx_nic *efx)
i2c_unregister_device(efx->board_info.hwmon_client); i2c_unregister_device(efx->board_info.hwmon_client);
} }
static struct i2c_board_info sfn4111t_hwmon_info = { static struct i2c_board_info sfn4111t_a0_hwmon_info = {
I2C_BOARD_INFO("max6647", 0x4e), I2C_BOARD_INFO("max6647", 0x4e),
.irq = -1, .irq = -1,
}; };
static struct i2c_board_info sfn4111t_r5_hwmon_info = {
I2C_BOARD_INFO("max6646", 0x4d),
.irq = -1,
};
int sfn4111t_init(struct efx_nic *efx) int sfn4111t_init(struct efx_nic *efx)
{ {
int rc; int rc;
efx->board_info.hwmon_client = efx->board_info.hwmon_client =
i2c_new_device(&efx->i2c_adap, &sfn4111t_hwmon_info); i2c_new_device(&efx->i2c_adap,
(efx->board_info.minor < 5) ?
&sfn4111t_a0_hwmon_info :
&sfn4111t_r5_hwmon_info);
if (!efx->board_info.hwmon_client) if (!efx->board_info.hwmon_client)
return -EIO; return -EIO;
@ -395,8 +417,10 @@ int sfn4111t_init(struct efx_nic *efx)
if (rc) if (rc)
goto fail_hwmon; goto fail_hwmon;
if (efx->phy_mode & PHY_MODE_SPECIAL) if (efx->phy_mode & PHY_MODE_SPECIAL) {
efx_stats_disable(efx);
sfn4111t_reset(efx); sfn4111t_reset(efx);
}
return 0; return 0;

View File

@ -68,6 +68,8 @@
#define PMA_PMD_EXT_CLK312_WIDTH 1 #define PMA_PMD_EXT_CLK312_WIDTH 1
#define PMA_PMD_EXT_LPOWER_LBN 12 #define PMA_PMD_EXT_LPOWER_LBN 12
#define PMA_PMD_EXT_LPOWER_WIDTH 1 #define PMA_PMD_EXT_LPOWER_WIDTH 1
#define PMA_PMD_EXT_ROBUST_LBN 14
#define PMA_PMD_EXT_ROBUST_WIDTH 1
#define PMA_PMD_EXT_SSR_LBN 15 #define PMA_PMD_EXT_SSR_LBN 15
#define PMA_PMD_EXT_SSR_WIDTH 1 #define PMA_PMD_EXT_SSR_WIDTH 1
@ -178,35 +180,24 @@
#define C22EXT_STATUS_LINK_LBN 2 #define C22EXT_STATUS_LINK_LBN 2
#define C22EXT_STATUS_LINK_WIDTH 1 #define C22EXT_STATUS_LINK_WIDTH 1
#define C22EXT_MSTSLV_REG 49162 #define C22EXT_MSTSLV_CTRL 49161
#define C22EXT_MSTSLV_1000_HD_LBN 10 #define C22EXT_MSTSLV_CTRL_ADV_1000_HD_LBN 8
#define C22EXT_MSTSLV_1000_HD_WIDTH 1 #define C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN 9
#define C22EXT_MSTSLV_1000_FD_LBN 11
#define C22EXT_MSTSLV_1000_FD_WIDTH 1 #define C22EXT_MSTSLV_STATUS 49162
#define C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN 10
#define C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN 11
/* Time to wait between powering down the LNPGA and turning off the power /* Time to wait between powering down the LNPGA and turning off the power
* rails */ * rails */
#define LNPGA_PDOWN_WAIT (HZ / 5) #define LNPGA_PDOWN_WAIT (HZ / 5)
static int crc_error_reset_threshold = 100;
module_param(crc_error_reset_threshold, int, 0644);
MODULE_PARM_DESC(crc_error_reset_threshold,
"Max number of CRC errors before XAUI reset");
struct tenxpress_phy_data { struct tenxpress_phy_data {
enum efx_loopback_mode loopback_mode; enum efx_loopback_mode loopback_mode;
atomic_t bad_crc_count;
enum efx_phy_mode phy_mode; enum efx_phy_mode phy_mode;
int bad_lp_tries; int bad_lp_tries;
}; };
void tenxpress_crc_err(struct efx_nic *efx)
{
struct tenxpress_phy_data *phy_data = efx->phy_data;
if (phy_data != NULL)
atomic_inc(&phy_data->bad_crc_count);
}
static ssize_t show_phy_short_reach(struct device *dev, static ssize_t show_phy_short_reach(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
@ -285,7 +276,9 @@ static int tenxpress_init(struct efx_nic *efx)
PMA_PMD_XCONTROL_REG); PMA_PMD_XCONTROL_REG);
reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) | reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) |
(1 << PMA_PMD_EXT_CLK_OUT_LBN) | (1 << PMA_PMD_EXT_CLK_OUT_LBN) |
(1 << PMA_PMD_EXT_CLK312_LBN)); (1 << PMA_PMD_EXT_CLK312_LBN) |
(1 << PMA_PMD_EXT_ROBUST_LBN));
mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
PMA_PMD_XCONTROL_REG, reg); PMA_PMD_XCONTROL_REG, reg);
mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT, mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
@ -347,6 +340,7 @@ static int tenxpress_phy_init(struct efx_nic *efx)
rc = tenxpress_init(efx); rc = tenxpress_init(efx);
if (rc < 0) if (rc < 0)
goto fail; goto fail;
mdio_clause45_set_pause(efx);
if (efx->phy_type == PHY_TYPE_SFT9001B) { if (efx->phy_type == PHY_TYPE_SFT9001B) {
rc = device_create_file(&efx->pci_dev->dev, rc = device_create_file(&efx->pci_dev->dev,
@ -377,8 +371,8 @@ static int tenxpress_special_reset(struct efx_nic *efx)
/* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so
* a special software reset can glitch the XGMAC sufficiently for stats * a special software reset can glitch the XGMAC sufficiently for stats
* requests to fail. Since we don't often special_reset, just lock. */ * requests to fail. */
spin_lock(&efx->stats_lock); efx_stats_disable(efx);
/* Initiate reset */ /* Initiate reset */
reg = mdio_clause45_read(efx, efx->mii.phy_id, reg = mdio_clause45_read(efx, efx->mii.phy_id,
@ -393,17 +387,17 @@ static int tenxpress_special_reset(struct efx_nic *efx)
rc = mdio_clause45_wait_reset_mmds(efx, rc = mdio_clause45_wait_reset_mmds(efx,
TENXPRESS_REQUIRED_DEVS); TENXPRESS_REQUIRED_DEVS);
if (rc < 0) if (rc < 0)
goto unlock; goto out;
/* Try and reconfigure the device */ /* Try and reconfigure the device */
rc = tenxpress_init(efx); rc = tenxpress_init(efx);
if (rc < 0) if (rc < 0)
goto unlock; goto out;
/* Wait for the XGXS state machine to churn */ /* Wait for the XGXS state machine to churn */
mdelay(10); mdelay(10);
unlock: out:
spin_unlock(&efx->stats_lock); efx_stats_enable(efx);
return rc; return rc;
} }
@ -521,7 +515,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
{ {
struct tenxpress_phy_data *phy_data = efx->phy_data; struct tenxpress_phy_data *phy_data = efx->phy_data;
struct ethtool_cmd ecmd; struct ethtool_cmd ecmd;
bool phy_mode_change, loop_reset, loop_toggle, loopback; bool phy_mode_change, loop_reset;
if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) { if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) {
phy_data->phy_mode = efx->phy_mode; phy_data->phy_mode = efx->phy_mode;
@ -532,12 +526,10 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL && phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL &&
phy_data->phy_mode != PHY_MODE_NORMAL); phy_data->phy_mode != PHY_MODE_NORMAL);
loopback = LOOPBACK_MASK(efx) & efx->phy_op->loopbacks;
loop_toggle = LOOPBACK_CHANGED(phy_data, efx, efx->phy_op->loopbacks);
loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, efx->phy_op->loopbacks) || loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, efx->phy_op->loopbacks) ||
LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY)); LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY));
if (loop_reset || loop_toggle || loopback || phy_mode_change) { if (loop_reset || phy_mode_change) {
int rc; int rc;
efx->phy_op->get_settings(efx, &ecmd); efx->phy_op->get_settings(efx, &ecmd);
@ -552,20 +544,6 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
falcon_reset_xaui(efx); falcon_reset_xaui(efx);
} }
if (efx->phy_type != PHY_TYPE_SFX7101) {
/* Only change autoneg once, on coming out or
* going into loopback */
if (loop_toggle)
ecmd.autoneg = !loopback;
if (loopback) {
ecmd.duplex = DUPLEX_FULL;
if (efx->loopback_mode == LOOPBACK_GPHY)
ecmd.speed = SPEED_1000;
else
ecmd.speed = SPEED_10000;
}
}
rc = efx->phy_op->set_settings(efx, &ecmd); rc = efx->phy_op->set_settings(efx, &ecmd);
WARN_ON(rc); WARN_ON(rc);
} }
@ -624,13 +602,6 @@ static void tenxpress_phy_poll(struct efx_nic *efx)
if (phy_data->phy_mode != PHY_MODE_NORMAL) if (phy_data->phy_mode != PHY_MODE_NORMAL)
return; return;
if (EFX_WORKAROUND_10750(efx) &&
atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) {
EFX_ERR(efx, "Resetting XAUI due to too many CRC errors\n");
falcon_reset_xaui(efx);
atomic_set(&phy_data->bad_crc_count, 0);
}
} }
static void tenxpress_phy_fini(struct efx_nic *efx) static void tenxpress_phy_fini(struct efx_nic *efx)
@ -773,107 +744,76 @@ reset:
return rc; return rc;
} }
static u32 tenxpress_get_xnp_lpa(struct efx_nic *efx) static void
tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{ {
int phy = efx->mii.phy_id; int phy_id = efx->mii.phy_id;
u32 lpa = 0; u32 adv = 0, lpa = 0;
int reg; int reg;
if (efx->phy_type != PHY_TYPE_SFX7101) { if (efx->phy_type != PHY_TYPE_SFX7101) {
reg = mdio_clause45_read(efx, phy, MDIO_MMD_C22EXT, reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
C22EXT_MSTSLV_REG); C22EXT_MSTSLV_CTRL);
if (reg & (1 << C22EXT_MSTSLV_1000_HD_LBN)) if (reg & (1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN))
adv |= ADVERTISED_1000baseT_Full;
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
C22EXT_MSTSLV_STATUS);
if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN))
lpa |= ADVERTISED_1000baseT_Half; lpa |= ADVERTISED_1000baseT_Half;
if (reg & (1 << C22EXT_MSTSLV_1000_FD_LBN)) if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN))
lpa |= ADVERTISED_1000baseT_Full; lpa |= ADVERTISED_1000baseT_Full;
} }
reg = mdio_clause45_read(efx, phy, MDIO_MMD_AN, MDIO_AN_10GBT_STATUS); reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
MDIO_AN_10GBT_CTRL);
if (reg & (1 << MDIO_AN_10GBT_CTRL_ADV_10G_LBN))
adv |= ADVERTISED_10000baseT_Full;
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
MDIO_AN_10GBT_STATUS);
if (reg & (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN)) if (reg & (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN))
lpa |= ADVERTISED_10000baseT_Full; lpa |= ADVERTISED_10000baseT_Full;
return lpa;
}
static void sfx7101_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) mdio_clause45_get_settings_ext(efx, ecmd, adv, lpa);
{
mdio_clause45_get_settings_ext(efx, ecmd, ADVERTISED_10000baseT_Full,
tenxpress_get_xnp_lpa(efx));
ecmd->supported |= SUPPORTED_10000baseT_Full;
ecmd->advertising |= ADVERTISED_10000baseT_Full;
}
static void sft9001_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) if (efx->phy_type != PHY_TYPE_SFX7101)
{ ecmd->supported |= (SUPPORTED_100baseT_Full |
int phy_id = efx->mii.phy_id;
u32 xnp_adv = 0;
int reg;
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
PMA_PMD_SPEED_ENABLE_REG);
if (EFX_WORKAROUND_13204(efx) && (reg & (1 << PMA_PMD_100TX_ADV_LBN)))
xnp_adv |= ADVERTISED_100baseT_Full;
if (reg & (1 << PMA_PMD_1000T_ADV_LBN))
xnp_adv |= ADVERTISED_1000baseT_Full;
if (reg & (1 << PMA_PMD_10000T_ADV_LBN))
xnp_adv |= ADVERTISED_10000baseT_Full;
mdio_clause45_get_settings_ext(efx, ecmd, xnp_adv,
tenxpress_get_xnp_lpa(efx));
ecmd->supported |= (SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full); SUPPORTED_1000baseT_Full);
/* Use the vendor defined C22ext register for duplex settings */ /* In loopback, the PHY automatically brings up the correct interface,
if (ecmd->speed != SPEED_10000 && !ecmd->autoneg) { * but doesn't advertise the correct speed. So override it */
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT, if (efx->loopback_mode == LOOPBACK_GPHY)
GPHY_XCONTROL_REG); ecmd->speed = SPEED_1000;
ecmd->duplex = (reg & (1 << GPHY_DUPLEX_LBN) ? else if (LOOPBACK_MASK(efx) & efx->phy_op->loopbacks)
DUPLEX_FULL : DUPLEX_HALF); ecmd->speed = SPEED_10000;
}
} }
static int sft9001_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
if (!ecmd->autoneg)
return -EINVAL;
return mdio_clause45_set_settings(efx, ecmd);
}
static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
{
mdio_clause45_set_flag(efx, efx->mii.phy_id, MDIO_MMD_AN,
MDIO_AN_10GBT_CTRL,
MDIO_AN_10GBT_CTRL_ADV_10G_LBN,
advertising & ADVERTISED_10000baseT_Full);
}
static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
{ {
int phy_id = efx->mii.phy_id; int phy_id = efx->mii.phy_id;
int rc;
rc = mdio_clause45_set_settings(efx, ecmd);
if (rc)
return rc;
if (ecmd->speed != SPEED_10000 && !ecmd->autoneg)
mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT, mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
GPHY_XCONTROL_REG, GPHY_DUPLEX_LBN, C22EXT_MSTSLV_CTRL,
ecmd->duplex == DUPLEX_FULL); C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN,
advertising & ADVERTISED_1000baseT_Full);
return rc; mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN,
} MDIO_AN_10GBT_CTRL,
MDIO_AN_10GBT_CTRL_ADV_10G_LBN,
static bool sft9001_set_xnp_advertise(struct efx_nic *efx, u32 advertising) advertising & ADVERTISED_10000baseT_Full);
{
int phy = efx->mii.phy_id;
int reg = mdio_clause45_read(efx, phy, MDIO_MMD_PMAPMD,
PMA_PMD_SPEED_ENABLE_REG);
bool enabled;
reg &= ~((1 << 2) | (1 << 3));
if (EFX_WORKAROUND_13204(efx) &&
(advertising & ADVERTISED_100baseT_Full))
reg |= 1 << PMA_PMD_100TX_ADV_LBN;
if (advertising & ADVERTISED_1000baseT_Full)
reg |= 1 << PMA_PMD_1000T_ADV_LBN;
if (advertising & ADVERTISED_10000baseT_Full)
reg |= 1 << PMA_PMD_10000T_ADV_LBN;
mdio_clause45_write(efx, phy, MDIO_MMD_PMAPMD,
PMA_PMD_SPEED_ENABLE_REG, reg);
enabled = (advertising &
(ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full |
ADVERTISED_10000baseT_Full));
if (EFX_WORKAROUND_13204(efx))
enabled |= (advertising & ADVERTISED_100baseT_Full);
return enabled;
} }
struct efx_phy_operations falcon_sfx7101_phy_ops = { struct efx_phy_operations falcon_sfx7101_phy_ops = {
@ -883,8 +823,9 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
.poll = tenxpress_phy_poll, .poll = tenxpress_phy_poll,
.fini = tenxpress_phy_fini, .fini = tenxpress_phy_fini,
.clear_interrupt = efx_port_dummy_op_void, .clear_interrupt = efx_port_dummy_op_void,
.get_settings = sfx7101_get_settings, .get_settings = tenxpress_get_settings,
.set_settings = mdio_clause45_set_settings, .set_settings = tenxpress_set_settings,
.set_npage_adv = sfx7101_set_npage_adv,
.num_tests = ARRAY_SIZE(sfx7101_test_names), .num_tests = ARRAY_SIZE(sfx7101_test_names),
.test_names = sfx7101_test_names, .test_names = sfx7101_test_names,
.run_tests = sfx7101_run_tests, .run_tests = sfx7101_run_tests,
@ -899,9 +840,9 @@ struct efx_phy_operations falcon_sft9001_phy_ops = {
.poll = tenxpress_phy_poll, .poll = tenxpress_phy_poll,
.fini = tenxpress_phy_fini, .fini = tenxpress_phy_fini,
.clear_interrupt = efx_port_dummy_op_void, .clear_interrupt = efx_port_dummy_op_void,
.get_settings = sft9001_get_settings, .get_settings = tenxpress_get_settings,
.set_settings = sft9001_set_settings, .set_settings = tenxpress_set_settings,
.set_xnp_advertise = sft9001_set_xnp_advertise, .set_npage_adv = sft9001_set_npage_adv,
.num_tests = ARRAY_SIZE(sft9001_test_names), .num_tests = ARRAY_SIZE(sft9001_test_names),
.test_names = sft9001_test_names, .test_names = sft9001_test_names,
.run_tests = sft9001_run_tests, .run_tests = sft9001_run_tests,

View File

@ -18,8 +18,8 @@
#define EFX_WORKAROUND_ALWAYS(efx) 1 #define EFX_WORKAROUND_ALWAYS(efx) 1
#define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) #define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1)
#define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx) #define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx)
#define EFX_WORKAROUND_SFX7101(efx) ((efx)->phy_type == PHY_TYPE_SFX7101) #define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \
#define EFX_WORKAROUND_SFT9001A(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A) (efx)->phy_type == PHY_TYPE_SFT9001B)
/* XAUI resets if link not detected */ /* XAUI resets if link not detected */
#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
@ -29,8 +29,6 @@
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G #define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
/* TX pkt parser problem with <= 16 byte TXes */ /* TX pkt parser problem with <= 16 byte TXes */
#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS #define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
/* Low rate CRC errors require XAUI reset */
#define EFX_WORKAROUND_10750 EFX_WORKAROUND_SFX7101
/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor /* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
* or a PCIe error (bug 11028) */ * or a PCIe error (bug 11028) */
#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS #define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
@ -55,8 +53,8 @@
#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A #define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
/* Need to send XNP pages for 100BaseT */ /* Need to send XNP pages for 100BaseT */
#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001A #define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001
/* Need to keep AN enabled */ /* Don't restart AN in near-side loopback */
#define EFX_WORKAROUND_13963 EFX_WORKAROUND_SFT9001A #define EFX_WORKAROUND_15195 EFX_WORKAROUND_SFT9001
#endif /* EFX_WORKAROUNDS_H */ #endif /* EFX_WORKAROUNDS_H */

View File

@ -1003,9 +1003,9 @@ static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
break; break;
case SKFP_CLR_STATS: /* Zero out the driver statistics */ case SKFP_CLR_STATS: /* Zero out the driver statistics */
if (!capable(CAP_NET_ADMIN)) { if (!capable(CAP_NET_ADMIN)) {
memset(&lp->MacStat, 0, sizeof(lp->MacStat));
} else {
status = -EPERM; status = -EPERM;
} else {
memset(&lp->MacStat, 0, sizeof(lp->MacStat));
} }
break; break;
default: default:

View File

@ -1403,9 +1403,6 @@ static int sky2_up(struct net_device *dev)
} }
if (netif_msg_ifup(sky2))
printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
netif_carrier_off(dev); netif_carrier_off(dev);
/* must be power of 2 */ /* must be power of 2 */
@ -1484,6 +1481,9 @@ static int sky2_up(struct net_device *dev)
sky2_write32(hw, B0_IMSK, imask); sky2_write32(hw, B0_IMSK, imask);
sky2_set_multicast(dev); sky2_set_multicast(dev);
if (netif_msg_ifup(sky2))
printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
return 0; return 0;
err_out: err_out:

View File

@ -954,7 +954,7 @@ smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
do { do {
udelay(1); udelay(1);
val = smsc911x_reg_read(pdata, RX_DP_CTRL); val = smsc911x_reg_read(pdata, RX_DP_CTRL);
} while (timeout-- && (val & RX_DP_CTRL_RX_FFWD_)); } while (--timeout && (val & RX_DP_CTRL_RX_FFWD_));
if (unlikely(timeout == 0)) if (unlikely(timeout == 0))
SMSC_WARNING(HW, "Timed out waiting for " SMSC_WARNING(HW, "Timed out waiting for "

View File

@ -1378,6 +1378,7 @@ static int smsc9420_open(struct net_device *dev)
/* test the IRQ connection to the ISR */ /* test the IRQ connection to the ISR */
smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq); smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq);
pd->software_irq_signal = false;
spin_lock_irqsave(&pd->int_lock, flags); spin_lock_irqsave(&pd->int_lock, flags);
/* configure interrupt deassertion timer and enable interrupts */ /* configure interrupt deassertion timer and enable interrupts */
@ -1393,8 +1394,6 @@ static int smsc9420_open(struct net_device *dev)
smsc9420_pci_flush_write(pd); smsc9420_pci_flush_write(pd);
timeout = 1000; timeout = 1000;
pd->software_irq_signal = false;
smp_wmb();
while (timeout--) { while (timeout--) {
if (pd->software_irq_signal) if (pd->software_irq_signal)
break; break;

View File

@ -9,6 +9,11 @@
Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
for more information on this driver. for more information on this driver.
DC21143 manual "21143 PCI/CardBus 10/100Mb/s Ethernet LAN Controller
Hardware Reference Manual" is currently available at :
http://developer.intel.com/design/network/manuals/278074.htm
Please submit bugs to http://bugzilla.kernel.org/ . Please submit bugs to http://bugzilla.kernel.org/ .
*/ */
@ -32,7 +37,11 @@ void t21142_media_task(struct work_struct *work)
int csr12 = ioread32(ioaddr + CSR12); int csr12 = ioread32(ioaddr + CSR12);
int next_tick = 60*HZ; int next_tick = 60*HZ;
int new_csr6 = 0; int new_csr6 = 0;
int csr14 = ioread32(ioaddr + CSR14);
/* CSR12[LS10,LS100] are not reliable during autonegotiation */
if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
csr12 |= 6;
if (tulip_debug > 2) if (tulip_debug > 2)
printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n", printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n",
dev->name, csr12, medianame[dev->if_port]); dev->name, csr12, medianame[dev->if_port]);
@ -76,7 +85,7 @@ void t21142_media_task(struct work_struct *work)
new_csr6 = 0x83860000; new_csr6 = 0x83860000;
dev->if_port = 3; dev->if_port = 3;
iowrite32(0, ioaddr + CSR13); iowrite32(0, ioaddr + CSR13);
iowrite32(0x0003FF7F, ioaddr + CSR14); iowrite32(0x0003FFFF, ioaddr + CSR14);
iowrite16(8, ioaddr + CSR15); iowrite16(8, ioaddr + CSR15);
iowrite32(1, ioaddr + CSR13); iowrite32(1, ioaddr + CSR13);
} }
@ -132,10 +141,14 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
struct tulip_private *tp = netdev_priv(dev); struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr; void __iomem *ioaddr = tp->base_addr;
int csr12 = ioread32(ioaddr + CSR12); int csr12 = ioread32(ioaddr + CSR12);
int csr14 = ioread32(ioaddr + CSR14);
/* CSR12[LS10,LS100] are not reliable during autonegotiation */
if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
csr12 |= 6;
if (tulip_debug > 1) if (tulip_debug > 1)
printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, " printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, "
"%8.8x.\n", dev->name, csr12, csr5, ioread32(ioaddr + CSR14)); "%8.8x.\n", dev->name, csr12, csr5, csr14);
/* If NWay finished and we have a negotiated partner capability. */ /* If NWay finished and we have a negotiated partner capability. */
if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) { if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
@ -143,7 +156,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
int negotiated = tp->sym_advertise & (csr12 >> 16); int negotiated = tp->sym_advertise & (csr12 >> 16);
tp->lpar = csr12 >> 16; tp->lpar = csr12 >> 16;
tp->nwayset = 1; tp->nwayset = 1;
if (negotiated & 0x0100) dev->if_port = 5; /* If partner cannot negotiate, it is 10Mbps Half Duplex */
if (!(csr12 & 0x8000)) dev->if_port = 0;
else if (negotiated & 0x0100) dev->if_port = 5;
else if (negotiated & 0x0080) dev->if_port = 3; else if (negotiated & 0x0080) dev->if_port = 3;
else if (negotiated & 0x0040) dev->if_port = 4; else if (negotiated & 0x0040) dev->if_port = 4;
else if (negotiated & 0x0020) dev->if_port = 0; else if (negotiated & 0x0020) dev->if_port = 0;
@ -214,7 +229,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
tp->timer.expires = RUN_AT(3*HZ); tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer); add_timer(&tp->timer);
} else if (dev->if_port == 5) } else if (dev->if_port == 5)
iowrite32(ioread32(ioaddr + CSR14) & ~0x080, ioaddr + CSR14); iowrite32(csr14 & ~0x080, ioaddr + CSR14);
} else if (dev->if_port == 0 || dev->if_port == 4) { } else if (dev->if_port == 0 || dev->if_port == 4) {
if ((csr12 & 4) == 0) if ((csr12 & 4) == 0)
printk(KERN_INFO"%s: 21143 10baseT link beat good.\n", printk(KERN_INFO"%s: 21143 10baseT link beat good.\n",

View File

@ -1536,6 +1536,11 @@ static void adjust_link(struct net_device *dev)
static int init_phy(struct net_device *dev) static int init_phy(struct net_device *dev)
{ {
struct ucc_geth_private *priv = netdev_priv(dev); struct ucc_geth_private *priv = netdev_priv(dev);
struct device_node *np = priv->node;
struct device_node *phy, *mdio;
const phandle *ph;
char bus_name[MII_BUS_ID_SIZE];
const unsigned int *id;
struct phy_device *phydev; struct phy_device *phydev;
char phy_id[BUS_ID_SIZE]; char phy_id[BUS_ID_SIZE];
@ -1543,8 +1548,18 @@ static int init_phy(struct net_device *dev)
priv->oldspeed = 0; priv->oldspeed = 0;
priv->oldduplex = -1; priv->oldduplex = -1;
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, priv->ug_info->mdio_bus, ph = of_get_property(np, "phy-handle", NULL);
priv->ug_info->phy_address); phy = of_find_node_by_phandle(*ph);
mdio = of_get_parent(phy);
id = of_get_property(phy, "reg", NULL);
of_node_put(phy);
of_node_put(mdio);
uec_mdio_bus_name(bus_name, mdio);
snprintf(phy_id, sizeof(phy_id), "%s:%02x",
bus_name, *id);
phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface); phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface);
@ -3748,6 +3763,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
ugeth->ug_info = ug_info; ugeth->ug_info = ug_info;
ugeth->dev = dev; ugeth->dev = dev;
ugeth->node = np;
return 0; return 0;
} }

View File

@ -1186,6 +1186,8 @@ struct ucc_geth_private {
int oldspeed; int oldspeed;
int oldduplex; int oldduplex;
int oldlink; int oldlink;
struct device_node *node;
}; };
void uec_set_ethtool_ops(struct net_device *netdev); void uec_set_ethtool_ops(struct net_device *netdev);

View File

@ -156,7 +156,7 @@ static int uec_mdio_probe(struct of_device *ofdev, const struct of_device_id *ma
if (err) if (err)
goto reg_map_fail; goto reg_map_fail;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); uec_mdio_bus_name(new_bus->id, np);
new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL); new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL);
@ -283,3 +283,13 @@ void uec_mdio_exit(void)
{ {
of_unregister_platform_driver(&uec_mdio_driver); of_unregister_platform_driver(&uec_mdio_driver);
} }
void uec_mdio_bus_name(char *name, struct device_node *np)
{
const u32 *reg;
reg = of_get_property(np, "reg", NULL);
snprintf(name, MII_BUS_ID_SIZE, "%s@%x", np->name, reg ? *reg : 0);
}

View File

@ -97,4 +97,5 @@ int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
int uec_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); int uec_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
int __init uec_mdio_init(void); int __init uec_mdio_init(void);
void uec_mdio_exit(void); void uec_mdio_exit(void);
void uec_mdio_bus_name(char *name, struct device_node *np);
#endif /* __UEC_MII_H */ #endif /* __UEC_MII_H */

View File

@ -288,7 +288,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
skb_put(skb, MAX_PACKET_LEN); skb_put(skb, MAX_PACKET_LEN);
hdr = skb_vnet_hdr(skb); hdr = skb_vnet_hdr(skb);
sg_init_one(sg, hdr, sizeof(*hdr)); sg_set_buf(sg, hdr, sizeof(*hdr));
if (vi->big_packets) { if (vi->big_packets) {
for (i = 0; i < MAX_SKB_FRAGS; i++) { for (i = 0; i < MAX_SKB_FRAGS; i++) {
@ -489,9 +489,9 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
/* Encode metadata header at front. */ /* Encode metadata header at front. */
if (vi->mergeable_rx_bufs) if (vi->mergeable_rx_bufs)
sg_init_one(sg, mhdr, sizeof(*mhdr)); sg_set_buf(sg, mhdr, sizeof(*mhdr));
else else
sg_init_one(sg, hdr, sizeof(*hdr)); sg_set_buf(sg, hdr, sizeof(*hdr));
num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;

View File

@ -234,20 +234,6 @@ struct dentry *debugfs_create_i2400m_reset(
&fops_i2400m_reset); &fops_i2400m_reset);
} }
/*
* Debug levels control; see debug.h
*/
struct d_level D_LEVEL[] = {
D_SUBMODULE_DEFINE(control),
D_SUBMODULE_DEFINE(driver),
D_SUBMODULE_DEFINE(debugfs),
D_SUBMODULE_DEFINE(fw),
D_SUBMODULE_DEFINE(netdev),
D_SUBMODULE_DEFINE(rfkill),
D_SUBMODULE_DEFINE(rx),
D_SUBMODULE_DEFINE(tx),
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
#define __debugfs_register(prefix, name, parent) \ #define __debugfs_register(prefix, name, parent) \
do { \ do { \

View File

@ -707,6 +707,22 @@ void i2400m_release(struct i2400m *i2400m)
EXPORT_SYMBOL_GPL(i2400m_release); EXPORT_SYMBOL_GPL(i2400m_release);
/*
* Debug levels control; see debug.h
*/
struct d_level D_LEVEL[] = {
D_SUBMODULE_DEFINE(control),
D_SUBMODULE_DEFINE(driver),
D_SUBMODULE_DEFINE(debugfs),
D_SUBMODULE_DEFINE(fw),
D_SUBMODULE_DEFINE(netdev),
D_SUBMODULE_DEFINE(rfkill),
D_SUBMODULE_DEFINE(rx),
D_SUBMODULE_DEFINE(tx),
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
static static
int __init i2400m_driver_init(void) int __init i2400m_driver_init(void)
{ {

View File

@ -1028,6 +1028,8 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
* it's done by reseting the chip. To accomplish this we must * it's done by reseting the chip. To accomplish this we must
* first cleanup any pending DMA, then restart stuff after a la * first cleanup any pending DMA, then restart stuff after a la
* ath5k_init. * ath5k_init.
*
* Called with sc->lock.
*/ */
static int static int
ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
@ -2814,11 +2816,17 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
{ {
struct ath5k_softc *sc = hw->priv; struct ath5k_softc *sc = hw->priv;
struct ieee80211_conf *conf = &hw->conf; struct ieee80211_conf *conf = &hw->conf;
int ret;
mutex_lock(&sc->lock);
sc->bintval = conf->beacon_int; sc->bintval = conf->beacon_int;
sc->power_level = conf->power_level; sc->power_level = conf->power_level;
return ath5k_chan_set(sc, conf->channel); ret = ath5k_chan_set(sc, conf->channel);
mutex_unlock(&sc->lock);
return ret;
} }
static int static int

View File

@ -1719,6 +1719,10 @@ static int iwl_read_ucode(struct iwl_priv *priv)
priv->ucode_data_backup.len = data_size; priv->ucode_data_backup.len = data_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
!priv->ucode_data_backup.v_addr)
goto err_pci_alloc;
/* Initialization instructions and data */ /* Initialization instructions and data */
if (init_size && init_data_size) { if (init_size && init_data_size) {
priv->ucode_init.len = init_size; priv->ucode_init.len = init_size;

View File

@ -285,7 +285,10 @@ static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
ofdm_power = priv->channels[channel - 1].hw_value >> 4; ofdm_power = priv->channels[channel - 1].hw_value >> 4;
cck_power = min(cck_power, (u8)11); cck_power = min(cck_power, (u8)11);
ofdm_power = min(ofdm_power, (u8)35); if (ofdm_power > (u8)15)
ofdm_power = 25;
else
ofdm_power += 10;
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1); rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1);
@ -536,7 +539,10 @@ static void rtl8225z2_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
cck_power += priv->txpwr_base & 0xF; cck_power += priv->txpwr_base & 0xF;
cck_power = min(cck_power, (u8)35); cck_power = min(cck_power, (u8)35);
ofdm_power = min(ofdm_power, (u8)15); if (ofdm_power > (u8)15)
ofdm_power = 25;
else
ofdm_power += 10;
ofdm_power += priv->txpwr_base >> 4; ofdm_power += priv->txpwr_base >> 4;
ofdm_power = min(ofdm_power, (u8)35); ofdm_power = min(ofdm_power, (u8)35);

View File

@ -538,6 +538,7 @@ static int dev_ifsioc(unsigned int fd, unsigned int cmd, unsigned long arg)
* cannot be fixed without breaking all existing apps. * cannot be fixed without breaking all existing apps.
*/ */
case TUNSETIFF: case TUNSETIFF:
case TUNGETIFF:
case SIOCGIFFLAGS: case SIOCGIFFLAGS:
case SIOCGIFMETRIC: case SIOCGIFMETRIC:
case SIOCGIFMTU: case SIOCGIFMTU:
@ -1982,6 +1983,11 @@ COMPATIBLE_IOCTL(TUNSETNOCSUM)
COMPATIBLE_IOCTL(TUNSETDEBUG) COMPATIBLE_IOCTL(TUNSETDEBUG)
COMPATIBLE_IOCTL(TUNSETPERSIST) COMPATIBLE_IOCTL(TUNSETPERSIST)
COMPATIBLE_IOCTL(TUNSETOWNER) COMPATIBLE_IOCTL(TUNSETOWNER)
COMPATIBLE_IOCTL(TUNSETLINK)
COMPATIBLE_IOCTL(TUNSETGROUP)
COMPATIBLE_IOCTL(TUNGETFEATURES)
COMPATIBLE_IOCTL(TUNSETOFFLOAD)
COMPATIBLE_IOCTL(TUNSETTXFILTER)
/* Big V */ /* Big V */
COMPATIBLE_IOCTL(VT_SETMODE) COMPATIBLE_IOCTL(VT_SETMODE)
COMPATIBLE_IOCTL(VT_GETMODE) COMPATIBLE_IOCTL(VT_GETMODE)
@ -2573,6 +2579,7 @@ HANDLE_IOCTL(SIOCGIFPFLAGS, dev_ifsioc)
HANDLE_IOCTL(SIOCGIFTXQLEN, dev_ifsioc) HANDLE_IOCTL(SIOCGIFTXQLEN, dev_ifsioc)
HANDLE_IOCTL(SIOCSIFTXQLEN, dev_ifsioc) HANDLE_IOCTL(SIOCSIFTXQLEN, dev_ifsioc)
HANDLE_IOCTL(TUNSETIFF, dev_ifsioc) HANDLE_IOCTL(TUNSETIFF, dev_ifsioc)
HANDLE_IOCTL(TUNGETIFF, dev_ifsioc)
HANDLE_IOCTL(SIOCETHTOOL, ethtool_ioctl) HANDLE_IOCTL(SIOCETHTOOL, ethtool_ioctl)
HANDLE_IOCTL(SIOCBONDENSLAVE, bond_ioctl) HANDLE_IOCTL(SIOCBONDENSLAVE, bond_ioctl)
HANDLE_IOCTL(SIOCBONDRELEASE, bond_ioctl) HANDLE_IOCTL(SIOCBONDRELEASE, bond_ioctl)

View File

@ -183,7 +183,7 @@ static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
size = 2048; size = 2048;
if (nr_pcpus >= 32) if (nr_pcpus >= 32)
size = 4096; size = 4096;
if (sizeof(rwlock_t) != 0) { if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (size * sizeof(spinlock_t) > PAGE_SIZE) if (size * sizeof(spinlock_t) > PAGE_SIZE)
hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t)); hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));

View File

@ -2212,10 +2212,10 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
return 0; return 0;
next_skb: next_skb:
block_limit = skb_headlen(st->cur_skb); block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
if (abs_offset < block_limit) { if (abs_offset < block_limit) {
*data = st->cur_skb->data + abs_offset; *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
return block_limit - abs_offset; return block_limit - abs_offset;
} }
@ -2250,13 +2250,14 @@ next_skb:
st->frag_data = NULL; st->frag_data = NULL;
} }
if (st->cur_skb->next) { if (st->root_skb == st->cur_skb &&
st->cur_skb = st->cur_skb->next;
st->frag_idx = 0;
goto next_skb;
} else if (st->root_skb == st->cur_skb &&
skb_shinfo(st->root_skb)->frag_list) { skb_shinfo(st->root_skb)->frag_list) {
st->cur_skb = skb_shinfo(st->root_skb)->frag_list; st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
st->frag_idx = 0;
goto next_skb;
} else if (st->cur_skb->next) {
st->cur_skb = st->cur_skb->next;
st->frag_idx = 0;
goto next_skb; goto next_skb;
} }

View File

@ -1268,6 +1268,9 @@ __be32 __init root_nfs_parse_addr(char *name)
static int __init ip_auto_config(void) static int __init ip_auto_config(void)
{ {
__be32 addr; __be32 addr;
#ifdef IPCONFIG_DYNAMIC
int retries = CONF_OPEN_RETRIES;
#endif
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops); proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops);
@ -1304,9 +1307,6 @@ static int __init ip_auto_config(void)
#endif #endif
ic_first_dev->next) { ic_first_dev->next) {
#ifdef IPCONFIG_DYNAMIC #ifdef IPCONFIG_DYNAMIC
int retries = CONF_OPEN_RETRIES;
if (ic_dynamic() < 0) { if (ic_dynamic() < 0) {
ic_close_devs(); ic_close_devs();

View File

@ -524,7 +524,8 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
struct tcp_splice_state *tss = rd_desc->arg.data; struct tcp_splice_state *tss = rd_desc->arg.data;
int ret; int ret;
ret = skb_splice_bits(skb, offset, tss->pipe, rd_desc->count, tss->flags); ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
tss->flags);
if (ret > 0) if (ret > 0)
rd_desc->count -= ret; rd_desc->count -= ret;
return ret; return ret;

View File

@ -120,8 +120,11 @@ EXPORT_SYMBOL(sysctl_udp_wmem_min);
atomic_t udp_memory_allocated; atomic_t udp_memory_allocated;
EXPORT_SYMBOL(udp_memory_allocated); EXPORT_SYMBOL(udp_memory_allocated);
#define PORTS_PER_CHAIN (65536 / UDP_HTABLE_SIZE)
static int udp_lib_lport_inuse(struct net *net, __u16 num, static int udp_lib_lport_inuse(struct net *net, __u16 num,
const struct udp_hslot *hslot, const struct udp_hslot *hslot,
unsigned long *bitmap,
struct sock *sk, struct sock *sk,
int (*saddr_comp)(const struct sock *sk1, int (*saddr_comp)(const struct sock *sk1,
const struct sock *sk2)) const struct sock *sk2))
@ -132,12 +135,17 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
sk_nulls_for_each(sk2, node, &hslot->head) sk_nulls_for_each(sk2, node, &hslot->head)
if (net_eq(sock_net(sk2), net) && if (net_eq(sock_net(sk2), net) &&
sk2 != sk && sk2 != sk &&
sk2->sk_hash == num && (bitmap || sk2->sk_hash == num) &&
(!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
|| sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
(*saddr_comp)(sk, sk2)) (*saddr_comp)(sk, sk2)) {
if (bitmap)
__set_bit(sk2->sk_hash / UDP_HTABLE_SIZE,
bitmap);
else
return 1; return 1;
}
return 0; return 0;
} }
@ -160,32 +168,47 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
if (!snum) { if (!snum) {
int low, high, remaining; int low, high, remaining;
unsigned rand; unsigned rand;
unsigned short first; unsigned short first, last;
DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
inet_get_local_port_range(&low, &high); inet_get_local_port_range(&low, &high);
remaining = (high - low) + 1; remaining = (high - low) + 1;
rand = net_random(); rand = net_random();
snum = first = rand % remaining + low; first = (((u64)rand * remaining) >> 32) + low;
rand |= 1; /*
for (;;) { * force rand to be an odd multiple of UDP_HTABLE_SIZE
hslot = &udptable->hash[udp_hashfn(net, snum)]; */
rand = (rand | 1) * UDP_HTABLE_SIZE;
for (last = first + UDP_HTABLE_SIZE; first != last; first++) {
hslot = &udptable->hash[udp_hashfn(net, first)];
bitmap_zero(bitmap, PORTS_PER_CHAIN);
spin_lock_bh(&hslot->lock); spin_lock_bh(&hslot->lock);
if (!udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
break; saddr_comp);
spin_unlock_bh(&hslot->lock);
snum = first;
/*
* Iterate on all possible values of snum for this hash.
* Using steps of an odd multiple of UDP_HTABLE_SIZE
* give us randomization and full range coverage.
*/
do { do {
snum = snum + rand; if (low <= snum && snum <= high &&
} while (snum < low || snum > high); !test_bit(snum / UDP_HTABLE_SIZE, bitmap))
if (snum == first) goto found;
goto fail; snum += rand;
} while (snum != first);
spin_unlock_bh(&hslot->lock);
} }
goto fail;
} else { } else {
hslot = &udptable->hash[udp_hashfn(net, snum)]; hslot = &udptable->hash[udp_hashfn(net, snum)];
spin_lock_bh(&hslot->lock); spin_lock_bh(&hslot->lock);
if (udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp)) if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp))
goto fail_unlock; goto fail_unlock;
} }
found:
inet_sk(sk)->num = snum; inet_sk(sk)->num = snum;
sk->sk_hash = snum; sk->sk_hash = snum;
if (sk_unhashed(sk)) { if (sk_unhashed(sk)) {

View File

@ -4250,7 +4250,7 @@ static struct addrconf_sysctl_table
.procname = "mc_forwarding", .procname = "mc_forwarding",
.data = &ipv6_devconf.mc_forwarding, .data = &ipv6_devconf.mc_forwarding,
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0644, .mode = 0444,
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec,
}, },
#endif #endif

View File

@ -443,10 +443,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6)) if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
goto relookup_failed; goto relookup_failed;
if (ip6_dst_lookup(sk, &dst2, &fl)) if (ip6_dst_lookup(sk, &dst2, &fl2))
goto relookup_failed; goto relookup_failed;
err = xfrm_lookup(net, &dst2, &fl, sk, XFRM_LOOKUP_ICMP); err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP);
switch (err) { switch (err) {
case 0: case 0:
dst_release(dst); dst_release(dst);

View File

@ -255,6 +255,7 @@ int ip6_mc_input(struct sk_buff *skb)
* IPv6 multicast router mode is now supported ;) * IPv6 multicast router mode is now supported ;)
*/ */
if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
!(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) &&
likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
/* /*
* Okay, we try to forward - split and duplicate * Okay, we try to forward - split and duplicate
@ -316,7 +317,6 @@ int ip6_mc_input(struct sk_buff *skb)
} }
if (skb2) { if (skb2) {
skb2->dev = skb2->dst->dev;
ip6_mr_input(skb2); ip6_mr_input(skb2);
} }
} }

View File

@ -365,7 +365,9 @@ static int pim6_rcv(struct sk_buff *skb)
pim = (struct pimreghdr *)skb_transport_header(skb); pim = (struct pimreghdr *)skb_transport_header(skb);
if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) || if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
(pim->flags & PIM_NULL_REGISTER) || (pim->flags & PIM_NULL_REGISTER) ||
(ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
sizeof(*pim), IPPROTO_PIM,
csum_partial((void *)pim, sizeof(*pim), 0)) &&
csum_fold(skb_checksum(skb, 0, skb->len, 0)))) csum_fold(skb_checksum(skb, 0, skb->len, 0))))
goto drop; goto drop;
@ -392,7 +394,7 @@ static int pim6_rcv(struct sk_buff *skb)
skb_pull(skb, (u8 *)encap - skb->data); skb_pull(skb, (u8 *)encap - skb->data);
skb_reset_network_header(skb); skb_reset_network_header(skb);
skb->dev = reg_dev; skb->dev = reg_dev;
skb->protocol = htons(ETH_P_IP); skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = 0; skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST; skb->pkt_type = PACKET_HOST;
dst_release(skb->dst); dst_release(skb->dst);
@ -481,6 +483,7 @@ static int mif6_delete(struct net *net, int vifi)
{ {
struct mif_device *v; struct mif_device *v;
struct net_device *dev; struct net_device *dev;
struct inet6_dev *in6_dev;
if (vifi < 0 || vifi >= net->ipv6.maxvif) if (vifi < 0 || vifi >= net->ipv6.maxvif)
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
@ -513,6 +516,10 @@ static int mif6_delete(struct net *net, int vifi)
dev_set_allmulti(dev, -1); dev_set_allmulti(dev, -1);
in6_dev = __in6_dev_get(dev);
if (in6_dev)
in6_dev->cnf.mc_forwarding--;
if (v->flags & MIFF_REGISTER) if (v->flags & MIFF_REGISTER)
unregister_netdevice(dev); unregister_netdevice(dev);
@ -622,6 +629,7 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
int vifi = vifc->mif6c_mifi; int vifi = vifc->mif6c_mifi;
struct mif_device *v = &net->ipv6.vif6_table[vifi]; struct mif_device *v = &net->ipv6.vif6_table[vifi];
struct net_device *dev; struct net_device *dev;
struct inet6_dev *in6_dev;
int err; int err;
/* Is vif busy ? */ /* Is vif busy ? */
@ -662,6 +670,10 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
return -EINVAL; return -EINVAL;
} }
in6_dev = __in6_dev_get(dev);
if (in6_dev)
in6_dev->cnf.mc_forwarding++;
/* /*
* Fill in the VIF structures * Fill in the VIF structures
*/ */
@ -838,8 +850,6 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
skb->dst = dst_clone(pkt->dst); skb->dst = dst_clone(pkt->dst);
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_pull(skb, sizeof(struct ipv6hdr));
} }
if (net->ipv6.mroute6_sk == NULL) { if (net->ipv6.mroute6_sk == NULL) {
@ -1222,8 +1232,10 @@ static int ip6mr_sk_init(struct sock *sk)
rtnl_lock(); rtnl_lock();
write_lock_bh(&mrt_lock); write_lock_bh(&mrt_lock);
if (likely(net->ipv6.mroute6_sk == NULL)) if (likely(net->ipv6.mroute6_sk == NULL)) {
net->ipv6.mroute6_sk = sk; net->ipv6.mroute6_sk = sk;
net->ipv6.devconf_all->mc_forwarding++;
}
else else
err = -EADDRINUSE; err = -EADDRINUSE;
write_unlock_bh(&mrt_lock); write_unlock_bh(&mrt_lock);
@ -1242,6 +1254,7 @@ int ip6mr_sk_done(struct sock *sk)
if (sk == net->ipv6.mroute6_sk) { if (sk == net->ipv6.mroute6_sk) {
write_lock_bh(&mrt_lock); write_lock_bh(&mrt_lock);
net->ipv6.mroute6_sk = NULL; net->ipv6.mroute6_sk = NULL;
net->ipv6.devconf_all->mc_forwarding--;
write_unlock_bh(&mrt_lock); write_unlock_bh(&mrt_lock);
mroute_clean_tables(net); mroute_clean_tables(net);

View File

@ -794,7 +794,7 @@ void ip6_route_input(struct sk_buff *skb)
.proto = iph->nexthdr, .proto = iph->nexthdr,
}; };
if (rt6_need_strict(&iph->daddr)) if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
flags |= RT6_LOOKUP_F_IFACE; flags |= RT6_LOOKUP_F_IFACE;
skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input); skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input);

View File

@ -77,6 +77,7 @@
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/mutex.h>
#ifdef CONFIG_INET #ifdef CONFIG_INET
#include <net/inet_common.h> #include <net/inet_common.h>
@ -175,6 +176,7 @@ struct packet_sock {
#endif #endif
struct packet_type prot_hook; struct packet_type prot_hook;
spinlock_t bind_lock; spinlock_t bind_lock;
struct mutex pg_vec_lock;
unsigned int running:1, /* prot_hook is attached*/ unsigned int running:1, /* prot_hook is attached*/
auxdata:1, auxdata:1,
origdev:1; origdev:1;
@ -1069,6 +1071,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol)
*/ */
spin_lock_init(&po->bind_lock); spin_lock_init(&po->bind_lock);
mutex_init(&po->pg_vec_lock);
po->prot_hook.func = packet_rcv; po->prot_hook.func = packet_rcv;
if (sock->type == SOCK_PACKET) if (sock->type == SOCK_PACKET)
@ -1865,6 +1868,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
synchronize_net(); synchronize_net();
err = -EBUSY; err = -EBUSY;
mutex_lock(&po->pg_vec_lock);
if (closing || atomic_read(&po->mapped) == 0) { if (closing || atomic_read(&po->mapped) == 0) {
err = 0; err = 0;
#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; }) #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
@ -1886,6 +1890,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
if (atomic_read(&po->mapped)) if (atomic_read(&po->mapped))
printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped));
} }
mutex_unlock(&po->pg_vec_lock);
spin_lock(&po->bind_lock); spin_lock(&po->bind_lock);
if (was_running && !po->running) { if (was_running && !po->running) {
@ -1918,7 +1923,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
size = vma->vm_end - vma->vm_start; size = vma->vm_end - vma->vm_start;
lock_sock(sk); mutex_lock(&po->pg_vec_lock);
if (po->pg_vec == NULL) if (po->pg_vec == NULL)
goto out; goto out;
if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE) if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
@ -1941,7 +1946,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
err = 0; err = 0;
out: out:
release_sock(sk); mutex_unlock(&po->pg_vec_lock);
return err; return err;
} }
#endif #endif

View File

@ -28,17 +28,6 @@
#include "debug-levels.h" #include "debug-levels.h"
/* Debug framework control of debug levels */
struct d_level D_LEVEL[] = {
D_SUBMODULE_DEFINE(debugfs),
D_SUBMODULE_DEFINE(id_table),
D_SUBMODULE_DEFINE(op_msg),
D_SUBMODULE_DEFINE(op_reset),
D_SUBMODULE_DEFINE(op_rfkill),
D_SUBMODULE_DEFINE(stack),
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
#define __debugfs_register(prefix, name, parent) \ #define __debugfs_register(prefix, name, parent) \
do { \ do { \
result = d_level_register_debugfs(prefix, name, parent); \ result = d_level_register_debugfs(prefix, name, parent); \

View File

@ -516,6 +516,19 @@ void wimax_dev_rm(struct wimax_dev *wimax_dev)
} }
EXPORT_SYMBOL_GPL(wimax_dev_rm); EXPORT_SYMBOL_GPL(wimax_dev_rm);
/* Debug framework control of debug levels */
struct d_level D_LEVEL[] = {
D_SUBMODULE_DEFINE(debugfs),
D_SUBMODULE_DEFINE(id_table),
D_SUBMODULE_DEFINE(op_msg),
D_SUBMODULE_DEFINE(op_reset),
D_SUBMODULE_DEFINE(op_rfkill),
D_SUBMODULE_DEFINE(stack),
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
struct genl_family wimax_gnl_family = { struct genl_family wimax_gnl_family = {
.id = GENL_ID_GENERATE, .id = GENL_ID_GENERATE,
.name = "WiMAX", .name = "WiMAX",

View File

@ -498,6 +498,7 @@ static struct ieee80211_regdomain *country_ie_2_rd(
* calculate the number of reg rules we will need. We will need one * calculate the number of reg rules we will need. We will need one
* for each channel subband */ * for each channel subband */
while (country_ie_len >= 3) { while (country_ie_len >= 3) {
int end_channel = 0;
struct ieee80211_country_ie_triplet *triplet = struct ieee80211_country_ie_triplet *triplet =
(struct ieee80211_country_ie_triplet *) country_ie; (struct ieee80211_country_ie_triplet *) country_ie;
int cur_sub_max_channel = 0, cur_channel = 0; int cur_sub_max_channel = 0, cur_channel = 0;
@ -509,9 +510,25 @@ static struct ieee80211_regdomain *country_ie_2_rd(
continue; continue;
} }
/* 2 GHz */
if (triplet->chans.first_channel <= 14)
end_channel = triplet->chans.first_channel +
triplet->chans.num_channels;
else
/*
* 5 GHz -- For example in country IEs if the first
* channel given is 36 and the number of channels is 4
* then the individual channel numbers defined for the
* 5 GHz PHY by these parameters are: 36, 40, 44, and 48
* and not 36, 37, 38, 39.
*
* See: http://tinyurl.com/11d-clarification
*/
end_channel = triplet->chans.first_channel +
(4 * (triplet->chans.num_channels - 1));
cur_channel = triplet->chans.first_channel; cur_channel = triplet->chans.first_channel;
cur_sub_max_channel = ieee80211_channel_to_frequency( cur_sub_max_channel = end_channel;
cur_channel + triplet->chans.num_channels);
/* Basic sanity check */ /* Basic sanity check */
if (cur_sub_max_channel < cur_channel) if (cur_sub_max_channel < cur_channel)
@ -590,15 +607,6 @@ static struct ieee80211_regdomain *country_ie_2_rd(
end_channel = triplet->chans.first_channel + end_channel = triplet->chans.first_channel +
triplet->chans.num_channels; triplet->chans.num_channels;
else else
/*
* 5 GHz -- For example in country IEs if the first
* channel given is 36 and the number of channels is 4
* then the individual channel numbers defined for the
* 5 GHz PHY by these parameters are: 36, 40, 44, and 48
* and not 36, 37, 38, 39.
*
* See: http://tinyurl.com/11d-clarification
*/
end_channel = triplet->chans.first_channel + end_channel = triplet->chans.first_channel +
(4 * (triplet->chans.num_channels - 1)); (4 * (triplet->chans.num_channels - 1));
@ -1276,7 +1284,7 @@ static void reg_country_ie_process_debug(
if (intersected_rd) { if (intersected_rd) {
printk(KERN_DEBUG "cfg80211: We intersect both of these " printk(KERN_DEBUG "cfg80211: We intersect both of these "
"and get:\n"); "and get:\n");
print_regdomain_info(rd); print_regdomain_info(intersected_rd);
return; return;
} }
printk(KERN_DEBUG "cfg80211: Intersection between both failed\n"); printk(KERN_DEBUG "cfg80211: Intersection between both failed\n");