Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:
	drivers/net/e1000/e1000_main.c
This commit is contained in:
David S. Miller 2009-01-30 14:31:07 -08:00
commit 05bee47377
49 changed files with 515 additions and 427 deletions

View File

@ -2,14 +2,14 @@
IP-Aliasing:
============
IP-aliases are additional IP-addresses/masks hooked up to a base
interface by adding a colon and a string when running ifconfig.
IP-aliases are an obsolete way to manage multiple IP-addresses/masks
per interface. Newer tools such as iproute2 support multiple
address/prefixes per interface, but aliases are still supported
for backwards compatibility.
An alias is formed by adding a colon and a string when running ifconfig.
This string is usually numeric, but this is not a must.
IP-Aliases are avail if CONFIG_INET (`standard' IPv4 networking)
is configured in the kernel.
o Alias creation.
Alias creation is done by 'magic' interface naming: eg. to create a
200.1.1.1 alias for eth0 ...
@ -38,16 +38,3 @@ o Relationship with main device
If the base device is shut down the added aliases will be deleted
too.
Contact
-------
Please finger or e-mail me:
Juan Jose Ciarlante <jjciarla@raiz.uncu.edu.ar>
Updated by Erik Schoenfelder <schoenfr@gaertner.DE>
; local variables:
; mode: indented-text
; mode: auto-fill
; end:

View File

@ -2836,8 +2836,6 @@ S: Maintained
MAC80211
P: Johannes Berg
M: johannes@sipsolutions.net
P: Michael Wu
M: flamingice@sourmilk.net
L: linux-wireless@vger.kernel.org
W: http://linuxwireless.org/
T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git

View File

@ -31,7 +31,7 @@
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#define DRV_VERSION "7.3.21-k2-NAPI"
#define DRV_VERSION "7.3.21-k3-NAPI"
const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@ -3697,7 +3697,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
struct e1000_hw *hw = &adapter->hw;
u32 rctl, icr = er32(ICR);
if (unlikely(!icr))
if (unlikely((!icr) || test_bit(__E1000_RESETTING, &adapter->flags)))
return IRQ_NONE; /* Not our interrupt */
/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is

View File

@ -234,6 +234,8 @@ static int gfar_mdio_probe(struct of_device *ofdev,
if (NULL == new_bus)
return -ENOMEM;
device_init_wakeup(&ofdev->dev, 1);
new_bus->name = "Gianfar MII Bus",
new_bus->read = &gfar_mdio_read,
new_bus->write = &gfar_mdio_write,

View File

@ -210,7 +210,7 @@
#define MAX_CMD_DESCRIPTORS_HOST 1024
#define MAX_RCV_DESCRIPTORS_1G 2048
#define MAX_RCV_DESCRIPTORS_10G 4096
#define MAX_JUMBO_RCV_DESCRIPTORS 512
#define MAX_JUMBO_RCV_DESCRIPTORS 1024
#define MAX_LRO_RCV_DESCRIPTORS 8
#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS
#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS

View File

@ -947,8 +947,10 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
}
for (i = 0; i < n; i++) {
if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0)
netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
kfree(buf);
return -EIO;
}
buf[i].addr = addr;
buf[i].data = val;

View File

@ -438,7 +438,6 @@ static void r6040_down(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
struct pci_dev *pdev = lp->pdev;
int limit = 2048;
u16 *adrp;
u16 cmd;

View File

@ -675,9 +675,8 @@ static int efx_init_port(struct efx_nic *efx)
rc = efx->phy_op->init(efx);
if (rc)
return rc;
efx->phy_op->reconfigure(efx);
mutex_lock(&efx->mac_lock);
efx->phy_op->reconfigure(efx);
rc = falcon_switch_mac(efx);
mutex_unlock(&efx->mac_lock);
if (rc)
@ -685,7 +684,7 @@ static int efx_init_port(struct efx_nic *efx)
efx->mac_op->reconfigure(efx);
efx->port_initialized = true;
efx->stats_enabled = true;
efx_stats_enable(efx);
return 0;
fail:
@ -734,6 +733,7 @@ static void efx_fini_port(struct efx_nic *efx)
if (!efx->port_initialized)
return;
efx_stats_disable(efx);
efx->phy_op->fini(efx);
efx->port_initialized = false;
@ -1352,6 +1352,20 @@ static int efx_net_stop(struct net_device *net_dev)
return 0;
}
void efx_stats_disable(struct efx_nic *efx)
{
spin_lock(&efx->stats_lock);
++efx->stats_disable_count;
spin_unlock(&efx->stats_lock);
}
void efx_stats_enable(struct efx_nic *efx)
{
spin_lock(&efx->stats_lock);
--efx->stats_disable_count;
spin_unlock(&efx->stats_lock);
}
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
{
@ -1360,12 +1374,12 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
struct net_device_stats *stats = &net_dev->stats;
/* Update stats if possible, but do not wait if another thread
* is updating them (or resetting the NIC); slightly stale
* stats are acceptable.
* is updating them or if MAC stats fetches are temporarily
* disabled; slightly stale stats are acceptable.
*/
if (!spin_trylock(&efx->stats_lock))
return stats;
if (efx->stats_enabled) {
if (!efx->stats_disable_count) {
efx->mac_op->update_stats(efx);
falcon_update_nic_stats(efx);
}
@ -1613,16 +1627,12 @@ static void efx_unregister_netdev(struct efx_nic *efx)
/* Tears down the entire software state and most of the hardware state
* before reset. */
void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
void efx_reset_down(struct efx_nic *efx, enum reset_type method,
struct ethtool_cmd *ecmd)
{
EFX_ASSERT_RESET_SERIALISED(efx);
/* The net_dev->get_stats handler is quite slow, and will fail
* if a fetch is pending over reset. Serialise against it. */
spin_lock(&efx->stats_lock);
efx->stats_enabled = false;
spin_unlock(&efx->stats_lock);
efx_stats_disable(efx);
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
mutex_lock(&efx->spi_lock);
@ -1630,6 +1640,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
efx->phy_op->get_settings(efx, ecmd);
efx_fini_channels(efx);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
efx->phy_op->fini(efx);
}
/* This function will always ensure that the locks acquired in
@ -1637,7 +1649,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
* that we were unable to reinitialise the hardware, and the
* driver should be disabled. If ok is false, then the rx and tx
* engines are not restarted, pending a RESET_DISABLE. */
int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
int efx_reset_up(struct efx_nic *efx, enum reset_type method,
struct ethtool_cmd *ecmd, bool ok)
{
int rc;
@ -1649,6 +1662,15 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
ok = false;
}
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
if (ok) {
rc = efx->phy_op->init(efx);
if (rc)
ok = false;
} else
efx->port_initialized = false;
}
if (ok) {
efx_init_channels(efx);
@ -1661,7 +1683,7 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
if (ok) {
efx_start_all(efx);
efx->stats_enabled = true;
efx_stats_enable(efx);
}
return rc;
}
@ -1693,7 +1715,7 @@ static int efx_reset(struct efx_nic *efx)
EFX_INFO(efx, "resetting (%d)\n", method);
efx_reset_down(efx, &ecmd);
efx_reset_down(efx, method, &ecmd);
rc = falcon_reset_hw(efx, method);
if (rc) {
@ -1712,10 +1734,10 @@ static int efx_reset(struct efx_nic *efx)
/* Leave device stopped if necessary */
if (method == RESET_TYPE_DISABLE) {
efx_reset_up(efx, &ecmd, false);
efx_reset_up(efx, method, &ecmd, false);
rc = -EIO;
} else {
rc = efx_reset_up(efx, &ecmd, true);
rc = efx_reset_up(efx, method, &ecmd, true);
}
out_disable:
@ -1867,6 +1889,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
efx->rx_checksum_enabled = true;
spin_lock_init(&efx->netif_stop_lock);
spin_lock_init(&efx->stats_lock);
efx->stats_disable_count = 1;
mutex_init(&efx->mac_lock);
efx->mac_op = &efx_dummy_mac_operations;
efx->phy_op = &efx_dummy_phy_operations;

View File

@ -36,13 +36,16 @@ extern void efx_process_channel_now(struct efx_channel *channel);
extern void efx_flush_queues(struct efx_nic *efx);
/* Ports */
extern void efx_stats_disable(struct efx_nic *efx);
extern void efx_stats_enable(struct efx_nic *efx);
extern void efx_reconfigure_port(struct efx_nic *efx);
extern void __efx_reconfigure_port(struct efx_nic *efx);
/* Reset handling */
extern void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd);
extern int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd,
bool ok);
extern void efx_reset_down(struct efx_nic *efx, enum reset_type method,
struct ethtool_cmd *ecmd);
extern int efx_reset_up(struct efx_nic *efx, enum reset_type method,
struct ethtool_cmd *ecmd, bool ok);
/* Global */
extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);

View File

@ -219,9 +219,6 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
if (EFX_WORKAROUND_13963(efx) && !ecmd->autoneg)
return -EINVAL;
/* Falcon GMAC does not support 1000Mbps HD */
if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
EFX_LOG(efx, "rejecting unsupported 1000Mbps HD"

View File

@ -824,10 +824,6 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
rx_ev_pause_frm ? " [PAUSE]" : "");
}
#endif
if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) &&
efx->phy_type == PHY_TYPE_SFX7101))
tenxpress_crc_err(efx);
}
/* Handle receive events that are not in-order. */
@ -1887,7 +1883,7 @@ static int falcon_reset_macs(struct efx_nic *efx)
/* MAC stats will fail whilst the TX fifo is draining. Serialise
* the drain sequence with the statistics fetch */
spin_lock(&efx->stats_lock);
efx_stats_disable(efx);
falcon_read(efx, &reg, MAC0_CTRL_REG_KER);
EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1);
@ -1917,7 +1913,7 @@ static int falcon_reset_macs(struct efx_nic *efx)
udelay(10);
}
spin_unlock(&efx->stats_lock);
efx_stats_enable(efx);
/* If we've reset the EM block and the link is up, then
* we'll have to kick the XAUI link so the PHY can recover */
@ -2277,6 +2273,10 @@ int falcon_switch_mac(struct efx_nic *efx)
struct efx_mac_operations *old_mac_op = efx->mac_op;
efx_oword_t nic_stat;
unsigned strap_val;
int rc = 0;
/* Don't try to fetch MAC stats while we're switching MACs */
efx_stats_disable(efx);
/* Internal loopbacks override the phy speed setting */
if (efx->loopback_mode == LOOPBACK_GMAC) {
@ -2287,16 +2287,12 @@ int falcon_switch_mac(struct efx_nic *efx)
efx->link_fd = true;
}
WARN_ON(!mutex_is_locked(&efx->mac_lock));
efx->mac_op = (EFX_IS10G(efx) ?
&falcon_xmac_operations : &falcon_gmac_operations);
if (old_mac_op == efx->mac_op)
return 0;
WARN_ON(!mutex_is_locked(&efx->mac_lock));
/* Not all macs support a mac-level link state */
efx->mac_up = true;
/* Always push the NIC_STAT_REG setting even if the mac hasn't
* changed, because this function is run post online reset */
falcon_read(efx, &nic_stat, NIC_STAT_REG);
strap_val = EFX_IS10G(efx) ? 5 : 3;
if (falcon_rev(efx) >= FALCON_REV_B0) {
@ -2309,9 +2305,17 @@ int falcon_switch_mac(struct efx_nic *efx)
BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val);
}
if (old_mac_op == efx->mac_op)
goto out;
EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
return falcon_reset_macs(efx);
/* Not all macs support a mac-level link state */
efx->mac_up = true;
rc = falcon_reset_macs(efx);
out:
efx_stats_enable(efx);
return rc;
}
/* This call is responsible for hooking in the MAC and PHY operations */

View File

@ -15,6 +15,7 @@
#include "net_driver.h"
#include "mdio_10g.h"
#include "boards.h"
#include "workarounds.h"
int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd,
int spins, int spintime)
@ -179,17 +180,12 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
return false;
else if (efx_phy_mode_disabled(efx->phy_mode))
return false;
else if (efx->loopback_mode == LOOPBACK_PHYXS) {
else if (efx->loopback_mode == LOOPBACK_PHYXS)
mmd_mask &= ~(MDIO_MMDREG_DEVS_PHYXS |
MDIO_MMDREG_DEVS_PCS |
MDIO_MMDREG_DEVS_PMAPMD |
MDIO_MMDREG_DEVS_AN);
if (!mmd_mask) {
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
MDIO_PHYXS_STATUS2);
return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN));
}
} else if (efx->loopback_mode == LOOPBACK_PCS)
else if (efx->loopback_mode == LOOPBACK_PCS)
mmd_mask &= ~(MDIO_MMDREG_DEVS_PCS |
MDIO_MMDREG_DEVS_PMAPMD |
MDIO_MMDREG_DEVS_AN);
@ -197,6 +193,13 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
mmd_mask &= ~(MDIO_MMDREG_DEVS_PMAPMD |
MDIO_MMDREG_DEVS_AN);
if (!mmd_mask) {
/* Use presence of XGMII faults in leui of link state */
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
MDIO_PHYXS_STATUS2);
return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN));
}
while (mmd_mask) {
if (mmd_mask & 1) {
/* Double reads because link state is latched, and a
@ -263,7 +266,7 @@ void mdio_clause45_set_mmds_lpower(struct efx_nic *efx,
}
}
static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp)
static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr)
{
int phy_id = efx->mii.phy_id;
u32 result = 0;
@ -278,9 +281,6 @@ static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp)
result |= ADVERTISED_100baseT_Half;
if (reg & ADVERTISE_100FULL)
result |= ADVERTISED_100baseT_Full;
if (reg & LPA_RESV)
result |= xnp;
return result;
}
@ -310,7 +310,7 @@ void mdio_clause45_get_settings(struct efx_nic *efx,
*/
void mdio_clause45_get_settings_ext(struct efx_nic *efx,
struct ethtool_cmd *ecmd,
u32 xnp, u32 xnp_lpa)
u32 npage_adv, u32 npage_lpa)
{
int phy_id = efx->mii.phy_id;
int reg;
@ -361,8 +361,8 @@ void mdio_clause45_get_settings_ext(struct efx_nic *efx,
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->advertising |=
ADVERTISED_Autoneg |
mdio_clause45_get_an(efx,
MDIO_AN_ADVERTISE, xnp);
mdio_clause45_get_an(efx, MDIO_AN_ADVERTISE) |
npage_adv;
} else
ecmd->autoneg = AUTONEG_DISABLE;
} else
@ -371,27 +371,30 @@ void mdio_clause45_get_settings_ext(struct efx_nic *efx,
if (ecmd->autoneg) {
/* If AN is complete, report best common mode,
* otherwise report best advertised mode. */
u32 common = ecmd->advertising;
u32 modes = 0;
if (mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
MDIO_MMDREG_STAT1) &
(1 << MDIO_AN_STATUS_AN_DONE_LBN)) {
common &= mdio_clause45_get_an(efx, MDIO_AN_LPA,
xnp_lpa);
}
if (common & ADVERTISED_10000baseT_Full) {
(1 << MDIO_AN_STATUS_AN_DONE_LBN))
modes = (ecmd->advertising &
(mdio_clause45_get_an(efx, MDIO_AN_LPA) |
npage_lpa));
if (modes == 0)
modes = ecmd->advertising;
if (modes & ADVERTISED_10000baseT_Full) {
ecmd->speed = SPEED_10000;
ecmd->duplex = DUPLEX_FULL;
} else if (common & (ADVERTISED_1000baseT_Full |
} else if (modes & (ADVERTISED_1000baseT_Full |
ADVERTISED_1000baseT_Half)) {
ecmd->speed = SPEED_1000;
ecmd->duplex = !!(common & ADVERTISED_1000baseT_Full);
} else if (common & (ADVERTISED_100baseT_Full |
ecmd->duplex = !!(modes & ADVERTISED_1000baseT_Full);
} else if (modes & (ADVERTISED_100baseT_Full |
ADVERTISED_100baseT_Half)) {
ecmd->speed = SPEED_100;
ecmd->duplex = !!(common & ADVERTISED_100baseT_Full);
ecmd->duplex = !!(modes & ADVERTISED_100baseT_Full);
} else {
ecmd->speed = SPEED_10;
ecmd->duplex = !!(common & ADVERTISED_10baseT_Full);
ecmd->duplex = !!(modes & ADVERTISED_10baseT_Full);
}
} else {
/* Report forced settings */
@ -415,7 +418,7 @@ int mdio_clause45_set_settings(struct efx_nic *efx,
int phy_id = efx->mii.phy_id;
struct ethtool_cmd prev;
u32 required;
int ctrl1_bits, reg;
int reg;
efx->phy_op->get_settings(efx, &prev);
@ -430,73 +433,32 @@ int mdio_clause45_set_settings(struct efx_nic *efx,
if (prev.port != PORT_TP || ecmd->port != PORT_TP)
return -EINVAL;
/* Check that PHY supports these settings and work out the
* basic control bits */
if (ecmd->duplex) {
/* Check that PHY supports these settings */
if (ecmd->autoneg) {
required = SUPPORTED_Autoneg;
} else if (ecmd->duplex) {
switch (ecmd->speed) {
case SPEED_10:
ctrl1_bits = BMCR_FULLDPLX;
required = SUPPORTED_10baseT_Full;
break;
case SPEED_100:
ctrl1_bits = BMCR_SPEED100 | BMCR_FULLDPLX;
required = SUPPORTED_100baseT_Full;
break;
case SPEED_1000:
ctrl1_bits = BMCR_SPEED1000 | BMCR_FULLDPLX;
required = SUPPORTED_1000baseT_Full;
break;
case SPEED_10000:
ctrl1_bits = (BMCR_SPEED1000 | BMCR_SPEED100 |
BMCR_FULLDPLX);
required = SUPPORTED_10000baseT_Full;
break;
default:
return -EINVAL;
case SPEED_10: required = SUPPORTED_10baseT_Full; break;
case SPEED_100: required = SUPPORTED_100baseT_Full; break;
default: return -EINVAL;
}
} else {
switch (ecmd->speed) {
case SPEED_10:
ctrl1_bits = 0;
required = SUPPORTED_10baseT_Half;
break;
case SPEED_100:
ctrl1_bits = BMCR_SPEED100;
required = SUPPORTED_100baseT_Half;
break;
case SPEED_1000:
ctrl1_bits = BMCR_SPEED1000;
required = SUPPORTED_1000baseT_Half;
break;
default:
return -EINVAL;
case SPEED_10: required = SUPPORTED_10baseT_Half; break;
case SPEED_100: required = SUPPORTED_100baseT_Half; break;
default: return -EINVAL;
}
}
if (ecmd->autoneg)
required |= SUPPORTED_Autoneg;
required |= ecmd->advertising;
if (required & ~prev.supported)
return -EINVAL;
/* Set the basic control bits */
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
MDIO_MMDREG_CTRL1);
reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX | 0x003c);
reg |= ctrl1_bits;
mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, MDIO_MMDREG_CTRL1,
reg);
/* Set the AN registers */
if (ecmd->autoneg != prev.autoneg ||
ecmd->advertising != prev.advertising) {
bool xnp = false;
if (efx->phy_op->set_xnp_advertise)
xnp = efx->phy_op->set_xnp_advertise(efx,
ecmd->advertising);
if (ecmd->autoneg) {
reg = 0;
bool xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full
|| EFX_WORKAROUND_13204(efx));
/* Set up the base page */
reg = ADVERTISE_CSMA;
if (ecmd->advertising & ADVERTISED_10baseT_Half)
reg |= ADVERTISE_10HALF;
if (ecmd->advertising & ADVERTISED_10baseT_Full)
@ -507,22 +469,47 @@ int mdio_clause45_set_settings(struct efx_nic *efx,
reg |= ADVERTISE_100FULL;
if (xnp)
reg |= ADVERTISE_RESV;
else if (ecmd->advertising & (ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full))
reg |= ADVERTISE_NPAGE;
reg |= efx_fc_advertise(efx->wanted_fc);
mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
MDIO_AN_ADVERTISE, reg);
}
/* Set up the (extended) next page if necessary */
if (efx->phy_op->set_npage_adv)
efx->phy_op->set_npage_adv(efx, ecmd->advertising);
/* Enable and restart AN */
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
MDIO_MMDREG_CTRL1);
if (ecmd->autoneg)
reg |= BMCR_ANENABLE | BMCR_ANRESTART;
else
reg &= ~BMCR_ANENABLE;
reg |= BMCR_ANENABLE;
if (!(EFX_WORKAROUND_15195(efx) &&
LOOPBACK_MASK(efx) & efx->phy_op->loopbacks))
reg |= BMCR_ANRESTART;
if (xnp)
reg |= 1 << MDIO_AN_CTRL_XNP_LBN;
else
reg &= ~(1 << MDIO_AN_CTRL_XNP_LBN);
mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
MDIO_MMDREG_CTRL1, reg);
} else {
/* Disable AN */
mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN,
MDIO_MMDREG_CTRL1,
__ffs(BMCR_ANENABLE), false);
/* Set the basic control bits */
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
MDIO_MMDREG_CTRL1);
reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX |
0x003c);
if (ecmd->speed == SPEED_100)
reg |= BMCR_SPEED100;
if (ecmd->duplex)
reg |= BMCR_FULLDPLX;
mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
MDIO_MMDREG_CTRL1, reg);
}
return 0;

View File

@ -155,7 +155,8 @@
#define MDIO_AN_XNP 22
#define MDIO_AN_LPA_XNP 25
#define MDIO_AN_10GBT_ADVERTISE 32
#define MDIO_AN_10GBT_CTRL 32
#define MDIO_AN_10GBT_CTRL_ADV_10G_LBN 12
#define MDIO_AN_10GBT_STATUS (33)
#define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */
#define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */

View File

@ -557,7 +557,7 @@ struct efx_mac_operations {
* @poll: Poll for hardware state. Serialised by the mac_lock.
* @get_settings: Get ethtool settings. Serialised by the mac_lock.
* @set_settings: Set ethtool settings. Serialised by the mac_lock.
* @set_xnp_advertise: Set abilities advertised in Extended Next Page
* @set_npage_adv: Set abilities advertised in (Extended) Next Page
* (only needed where AN bit is set in mmds)
* @num_tests: Number of PHY-specific tests/results
* @test_names: Names of the tests/results
@ -577,7 +577,7 @@ struct efx_phy_operations {
struct ethtool_cmd *ecmd);
int (*set_settings) (struct efx_nic *efx,
struct ethtool_cmd *ecmd);
bool (*set_xnp_advertise) (struct efx_nic *efx, u32);
void (*set_npage_adv) (struct efx_nic *efx, u32);
u32 num_tests;
const char *const *test_names;
int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
@ -745,8 +745,7 @@ union efx_multicast_hash {
* &struct net_device_stats.
* @stats_buffer: DMA buffer for statistics
* @stats_lock: Statistics update lock. Serialises statistics fetches
* @stats_enabled: Temporarily disable statistics fetches.
* Serialised by @stats_lock
* @stats_disable_count: Nest count for disabling statistics fetches
* @mac_op: MAC interface
* @mac_address: Permanent MAC address
* @phy_type: PHY type
@ -828,7 +827,7 @@ struct efx_nic {
struct efx_mac_stats mac_stats;
struct efx_buffer stats_buffer;
spinlock_t stats_lock;
bool stats_enabled;
unsigned int stats_disable_count;
struct efx_mac_operations *mac_op;
unsigned char mac_address[ETH_ALEN];

View File

@ -17,7 +17,6 @@ extern struct efx_phy_operations falcon_sfx7101_phy_ops;
extern struct efx_phy_operations falcon_sft9001_phy_ops;
extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink);
extern void tenxpress_crc_err(struct efx_nic *efx);
/****************************************************************************
* Exported functions from the driver for XFP optical PHYs

View File

@ -665,6 +665,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
{
enum efx_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode;
enum reset_type reset_method = RESET_TYPE_INVISIBLE;
struct ethtool_cmd ecmd;
struct efx_channel *channel;
int rc_test = 0, rc_reset = 0, rc;
@ -718,21 +719,21 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
mutex_unlock(&efx->mac_lock);
/* free up all consumers of SRAM (including all the queues) */
efx_reset_down(efx, &ecmd);
efx_reset_down(efx, reset_method, &ecmd);
rc = efx_test_chip(efx, tests);
if (rc && !rc_test)
rc_test = rc;
/* reset the chip to recover from the register test */
rc_reset = falcon_reset_hw(efx, RESET_TYPE_ALL);
rc_reset = falcon_reset_hw(efx, reset_method);
/* Ensure that the phy is powered and out of loopback
* for the bist and loopback tests */
efx->phy_mode &= ~PHY_MODE_LOW_POWER;
efx->loopback_mode = LOOPBACK_NONE;
rc = efx_reset_up(efx, &ecmd, rc_reset == 0);
rc = efx_reset_up(efx, reset_method, &ecmd, rc_reset == 0);
if (rc && !rc_reset)
rc_reset = rc;

View File

@ -187,19 +187,22 @@ static int sfn4111t_reset(struct efx_nic *efx)
{
efx_oword_t reg;
/* GPIO pins are also used for I2C, so block that temporarily */
/* GPIO 3 and the GPIO register are shared with I2C, so block that */
mutex_lock(&efx->i2c_adap.bus_lock);
/* Pull RST_N (GPIO 2) low then let it up again, setting the
* FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
* output enables; the output levels should always be 0 (low)
* and we rely on external pull-ups. */
falcon_read(efx, &reg, GPIO_CTL_REG_KER);
EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true);
EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, false);
falcon_write(efx, &reg, GPIO_CTL_REG_KER);
msleep(1000);
EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, true);
EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, true);
EFX_SET_OWORD_FIELD(reg, GPIO3_OUT,
!(efx->phy_mode & PHY_MODE_SPECIAL));
EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, false);
EFX_SET_OWORD_FIELD(reg, GPIO3_OEN,
!!(efx->phy_mode & PHY_MODE_SPECIAL));
falcon_write(efx, &reg, GPIO_CTL_REG_KER);
msleep(1);
mutex_unlock(&efx->i2c_adap.bus_lock);
@ -233,12 +236,18 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
err = -EBUSY;
} else {
/* Reset the PHY, reconfigure the MAC and enable/disable
* MAC stats accordingly. */
efx->phy_mode = new_mode;
if (new_mode & PHY_MODE_SPECIAL)
efx_stats_disable(efx);
if (efx->board_info.type == EFX_BOARD_SFE4001)
err = sfe4001_poweron(efx);
else
err = sfn4111t_reset(efx);
efx_reconfigure_port(efx);
if (!(new_mode & PHY_MODE_SPECIAL))
efx_stats_enable(efx);
}
rtnl_unlock();
@ -327,6 +336,11 @@ int sfe4001_init(struct efx_nic *efx)
efx->board_info.monitor = sfe4001_check_hw;
efx->board_info.fini = sfe4001_fini;
if (efx->phy_mode & PHY_MODE_SPECIAL) {
/* PHY won't generate a 156.25 MHz clock and MAC stats fetch
* will fail. */
efx_stats_disable(efx);
}
rc = sfe4001_poweron(efx);
if (rc)
goto fail_ioexp;
@ -373,17 +387,25 @@ static void sfn4111t_fini(struct efx_nic *efx)
i2c_unregister_device(efx->board_info.hwmon_client);
}
static struct i2c_board_info sfn4111t_hwmon_info = {
static struct i2c_board_info sfn4111t_a0_hwmon_info = {
I2C_BOARD_INFO("max6647", 0x4e),
.irq = -1,
};
static struct i2c_board_info sfn4111t_r5_hwmon_info = {
I2C_BOARD_INFO("max6646", 0x4d),
.irq = -1,
};
int sfn4111t_init(struct efx_nic *efx)
{
int rc;
efx->board_info.hwmon_client =
i2c_new_device(&efx->i2c_adap, &sfn4111t_hwmon_info);
i2c_new_device(&efx->i2c_adap,
(efx->board_info.minor < 5) ?
&sfn4111t_a0_hwmon_info :
&sfn4111t_r5_hwmon_info);
if (!efx->board_info.hwmon_client)
return -EIO;
@ -395,8 +417,10 @@ int sfn4111t_init(struct efx_nic *efx)
if (rc)
goto fail_hwmon;
if (efx->phy_mode & PHY_MODE_SPECIAL)
if (efx->phy_mode & PHY_MODE_SPECIAL) {
efx_stats_disable(efx);
sfn4111t_reset(efx);
}
return 0;

View File

@ -68,6 +68,8 @@
#define PMA_PMD_EXT_CLK312_WIDTH 1
#define PMA_PMD_EXT_LPOWER_LBN 12
#define PMA_PMD_EXT_LPOWER_WIDTH 1
#define PMA_PMD_EXT_ROBUST_LBN 14
#define PMA_PMD_EXT_ROBUST_WIDTH 1
#define PMA_PMD_EXT_SSR_LBN 15
#define PMA_PMD_EXT_SSR_WIDTH 1
@ -178,35 +180,24 @@
#define C22EXT_STATUS_LINK_LBN 2
#define C22EXT_STATUS_LINK_WIDTH 1
#define C22EXT_MSTSLV_REG 49162
#define C22EXT_MSTSLV_1000_HD_LBN 10
#define C22EXT_MSTSLV_1000_HD_WIDTH 1
#define C22EXT_MSTSLV_1000_FD_LBN 11
#define C22EXT_MSTSLV_1000_FD_WIDTH 1
#define C22EXT_MSTSLV_CTRL 49161
#define C22EXT_MSTSLV_CTRL_ADV_1000_HD_LBN 8
#define C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN 9
#define C22EXT_MSTSLV_STATUS 49162
#define C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN 10
#define C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN 11
/* Time to wait between powering down the LNPGA and turning off the power
* rails */
#define LNPGA_PDOWN_WAIT (HZ / 5)
static int crc_error_reset_threshold = 100;
module_param(crc_error_reset_threshold, int, 0644);
MODULE_PARM_DESC(crc_error_reset_threshold,
"Max number of CRC errors before XAUI reset");
struct tenxpress_phy_data {
enum efx_loopback_mode loopback_mode;
atomic_t bad_crc_count;
enum efx_phy_mode phy_mode;
int bad_lp_tries;
};
void tenxpress_crc_err(struct efx_nic *efx)
{
struct tenxpress_phy_data *phy_data = efx->phy_data;
if (phy_data != NULL)
atomic_inc(&phy_data->bad_crc_count);
}
static ssize_t show_phy_short_reach(struct device *dev,
struct device_attribute *attr, char *buf)
{
@ -285,7 +276,9 @@ static int tenxpress_init(struct efx_nic *efx)
PMA_PMD_XCONTROL_REG);
reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) |
(1 << PMA_PMD_EXT_CLK_OUT_LBN) |
(1 << PMA_PMD_EXT_CLK312_LBN));
(1 << PMA_PMD_EXT_CLK312_LBN) |
(1 << PMA_PMD_EXT_ROBUST_LBN));
mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
PMA_PMD_XCONTROL_REG, reg);
mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
@ -347,6 +340,7 @@ static int tenxpress_phy_init(struct efx_nic *efx)
rc = tenxpress_init(efx);
if (rc < 0)
goto fail;
mdio_clause45_set_pause(efx);
if (efx->phy_type == PHY_TYPE_SFT9001B) {
rc = device_create_file(&efx->pci_dev->dev,
@ -377,8 +371,8 @@ static int tenxpress_special_reset(struct efx_nic *efx)
/* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so
* a special software reset can glitch the XGMAC sufficiently for stats
* requests to fail. Since we don't often special_reset, just lock. */
spin_lock(&efx->stats_lock);
* requests to fail. */
efx_stats_disable(efx);
/* Initiate reset */
reg = mdio_clause45_read(efx, efx->mii.phy_id,
@ -393,17 +387,17 @@ static int tenxpress_special_reset(struct efx_nic *efx)
rc = mdio_clause45_wait_reset_mmds(efx,
TENXPRESS_REQUIRED_DEVS);
if (rc < 0)
goto unlock;
goto out;
/* Try and reconfigure the device */
rc = tenxpress_init(efx);
if (rc < 0)
goto unlock;
goto out;
/* Wait for the XGXS state machine to churn */
mdelay(10);
unlock:
spin_unlock(&efx->stats_lock);
out:
efx_stats_enable(efx);
return rc;
}
@ -521,7 +515,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
{
struct tenxpress_phy_data *phy_data = efx->phy_data;
struct ethtool_cmd ecmd;
bool phy_mode_change, loop_reset, loop_toggle, loopback;
bool phy_mode_change, loop_reset;
if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) {
phy_data->phy_mode = efx->phy_mode;
@ -532,12 +526,10 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL &&
phy_data->phy_mode != PHY_MODE_NORMAL);
loopback = LOOPBACK_MASK(efx) & efx->phy_op->loopbacks;
loop_toggle = LOOPBACK_CHANGED(phy_data, efx, efx->phy_op->loopbacks);
loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, efx->phy_op->loopbacks) ||
LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY));
if (loop_reset || loop_toggle || loopback || phy_mode_change) {
if (loop_reset || phy_mode_change) {
int rc;
efx->phy_op->get_settings(efx, &ecmd);
@ -552,20 +544,6 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
falcon_reset_xaui(efx);
}
if (efx->phy_type != PHY_TYPE_SFX7101) {
/* Only change autoneg once, on coming out or
* going into loopback */
if (loop_toggle)
ecmd.autoneg = !loopback;
if (loopback) {
ecmd.duplex = DUPLEX_FULL;
if (efx->loopback_mode == LOOPBACK_GPHY)
ecmd.speed = SPEED_1000;
else
ecmd.speed = SPEED_10000;
}
}
rc = efx->phy_op->set_settings(efx, &ecmd);
WARN_ON(rc);
}
@ -624,13 +602,6 @@ static void tenxpress_phy_poll(struct efx_nic *efx)
if (phy_data->phy_mode != PHY_MODE_NORMAL)
return;
if (EFX_WORKAROUND_10750(efx) &&
atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) {
EFX_ERR(efx, "Resetting XAUI due to too many CRC errors\n");
falcon_reset_xaui(efx);
atomic_set(&phy_data->bad_crc_count, 0);
}
}
static void tenxpress_phy_fini(struct efx_nic *efx)
@ -773,107 +744,76 @@ reset:
return rc;
}
static u32 tenxpress_get_xnp_lpa(struct efx_nic *efx)
static void
tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
int phy = efx->mii.phy_id;
u32 lpa = 0;
int phy_id = efx->mii.phy_id;
u32 adv = 0, lpa = 0;
int reg;
if (efx->phy_type != PHY_TYPE_SFX7101) {
reg = mdio_clause45_read(efx, phy, MDIO_MMD_C22EXT,
C22EXT_MSTSLV_REG);
if (reg & (1 << C22EXT_MSTSLV_1000_HD_LBN))
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
C22EXT_MSTSLV_CTRL);
if (reg & (1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN))
adv |= ADVERTISED_1000baseT_Full;
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
C22EXT_MSTSLV_STATUS);
if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN))
lpa |= ADVERTISED_1000baseT_Half;
if (reg & (1 << C22EXT_MSTSLV_1000_FD_LBN))
if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN))
lpa |= ADVERTISED_1000baseT_Full;
}
reg = mdio_clause45_read(efx, phy, MDIO_MMD_AN, MDIO_AN_10GBT_STATUS);
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
MDIO_AN_10GBT_CTRL);
if (reg & (1 << MDIO_AN_10GBT_CTRL_ADV_10G_LBN))
adv |= ADVERTISED_10000baseT_Full;
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
MDIO_AN_10GBT_STATUS);
if (reg & (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN))
lpa |= ADVERTISED_10000baseT_Full;
return lpa;
}
static void sfx7101_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
mdio_clause45_get_settings_ext(efx, ecmd, ADVERTISED_10000baseT_Full,
tenxpress_get_xnp_lpa(efx));
ecmd->supported |= SUPPORTED_10000baseT_Full;
ecmd->advertising |= ADVERTISED_10000baseT_Full;
}
mdio_clause45_get_settings_ext(efx, ecmd, adv, lpa);
static void sft9001_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
int phy_id = efx->mii.phy_id;
u32 xnp_adv = 0;
int reg;
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
PMA_PMD_SPEED_ENABLE_REG);
if (EFX_WORKAROUND_13204(efx) && (reg & (1 << PMA_PMD_100TX_ADV_LBN)))
xnp_adv |= ADVERTISED_100baseT_Full;
if (reg & (1 << PMA_PMD_1000T_ADV_LBN))
xnp_adv |= ADVERTISED_1000baseT_Full;
if (reg & (1 << PMA_PMD_10000T_ADV_LBN))
xnp_adv |= ADVERTISED_10000baseT_Full;
mdio_clause45_get_settings_ext(efx, ecmd, xnp_adv,
tenxpress_get_xnp_lpa(efx));
ecmd->supported |= (SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
if (efx->phy_type != PHY_TYPE_SFX7101)
ecmd->supported |= (SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full);
/* Use the vendor defined C22ext register for duplex settings */
if (ecmd->speed != SPEED_10000 && !ecmd->autoneg) {
reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
GPHY_XCONTROL_REG);
ecmd->duplex = (reg & (1 << GPHY_DUPLEX_LBN) ?
DUPLEX_FULL : DUPLEX_HALF);
}
/* In loopback, the PHY automatically brings up the correct interface,
* but doesn't advertise the correct speed. So override it */
if (efx->loopback_mode == LOOPBACK_GPHY)
ecmd->speed = SPEED_1000;
else if (LOOPBACK_MASK(efx) & efx->phy_op->loopbacks)
ecmd->speed = SPEED_10000;
}
static int sft9001_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
if (!ecmd->autoneg)
return -EINVAL;
return mdio_clause45_set_settings(efx, ecmd);
}
static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
{
mdio_clause45_set_flag(efx, efx->mii.phy_id, MDIO_MMD_AN,
MDIO_AN_10GBT_CTRL,
MDIO_AN_10GBT_CTRL_ADV_10G_LBN,
advertising & ADVERTISED_10000baseT_Full);
}
static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
{
int phy_id = efx->mii.phy_id;
int rc;
rc = mdio_clause45_set_settings(efx, ecmd);
if (rc)
return rc;
if (ecmd->speed != SPEED_10000 && !ecmd->autoneg)
mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
GPHY_XCONTROL_REG, GPHY_DUPLEX_LBN,
ecmd->duplex == DUPLEX_FULL);
return rc;
}
static bool sft9001_set_xnp_advertise(struct efx_nic *efx, u32 advertising)
{
int phy = efx->mii.phy_id;
int reg = mdio_clause45_read(efx, phy, MDIO_MMD_PMAPMD,
PMA_PMD_SPEED_ENABLE_REG);
bool enabled;
reg &= ~((1 << 2) | (1 << 3));
if (EFX_WORKAROUND_13204(efx) &&
(advertising & ADVERTISED_100baseT_Full))
reg |= 1 << PMA_PMD_100TX_ADV_LBN;
if (advertising & ADVERTISED_1000baseT_Full)
reg |= 1 << PMA_PMD_1000T_ADV_LBN;
if (advertising & ADVERTISED_10000baseT_Full)
reg |= 1 << PMA_PMD_10000T_ADV_LBN;
mdio_clause45_write(efx, phy, MDIO_MMD_PMAPMD,
PMA_PMD_SPEED_ENABLE_REG, reg);
enabled = (advertising &
(ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full |
ADVERTISED_10000baseT_Full));
if (EFX_WORKAROUND_13204(efx))
enabled |= (advertising & ADVERTISED_100baseT_Full);
return enabled;
C22EXT_MSTSLV_CTRL,
C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN,
advertising & ADVERTISED_1000baseT_Full);
mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN,
MDIO_AN_10GBT_CTRL,
MDIO_AN_10GBT_CTRL_ADV_10G_LBN,
advertising & ADVERTISED_10000baseT_Full);
}
struct efx_phy_operations falcon_sfx7101_phy_ops = {
@ -883,8 +823,9 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
.poll = tenxpress_phy_poll,
.fini = tenxpress_phy_fini,
.clear_interrupt = efx_port_dummy_op_void,
.get_settings = sfx7101_get_settings,
.set_settings = mdio_clause45_set_settings,
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sfx7101_set_npage_adv,
.num_tests = ARRAY_SIZE(sfx7101_test_names),
.test_names = sfx7101_test_names,
.run_tests = sfx7101_run_tests,
@ -899,9 +840,9 @@ struct efx_phy_operations falcon_sft9001_phy_ops = {
.poll = tenxpress_phy_poll,
.fini = tenxpress_phy_fini,
.clear_interrupt = efx_port_dummy_op_void,
.get_settings = sft9001_get_settings,
.set_settings = sft9001_set_settings,
.set_xnp_advertise = sft9001_set_xnp_advertise,
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sft9001_set_npage_adv,
.num_tests = ARRAY_SIZE(sft9001_test_names),
.test_names = sft9001_test_names,
.run_tests = sft9001_run_tests,

View File

@ -18,8 +18,8 @@
#define EFX_WORKAROUND_ALWAYS(efx) 1
#define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1)
#define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx)
#define EFX_WORKAROUND_SFX7101(efx) ((efx)->phy_type == PHY_TYPE_SFX7101)
#define EFX_WORKAROUND_SFT9001A(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A)
#define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \
(efx)->phy_type == PHY_TYPE_SFT9001B)
/* XAUI resets if link not detected */
#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
@ -29,8 +29,6 @@
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
/* TX pkt parser problem with <= 16 byte TXes */
#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
/* Low rate CRC errors require XAUI reset */
#define EFX_WORKAROUND_10750 EFX_WORKAROUND_SFX7101
/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
* or a PCIe error (bug 11028) */
#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
@ -55,8 +53,8 @@
#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
/* Need to send XNP pages for 100BaseT */
#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001A
/* Need to keep AN enabled */
#define EFX_WORKAROUND_13963 EFX_WORKAROUND_SFT9001A
#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001
/* Don't restart AN in near-side loopback */
#define EFX_WORKAROUND_15195 EFX_WORKAROUND_SFT9001
#endif /* EFX_WORKAROUNDS_H */

View File

@ -1003,9 +1003,9 @@ static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
break;
case SKFP_CLR_STATS: /* Zero out the driver statistics */
if (!capable(CAP_NET_ADMIN)) {
memset(&lp->MacStat, 0, sizeof(lp->MacStat));
} else {
status = -EPERM;
} else {
memset(&lp->MacStat, 0, sizeof(lp->MacStat));
}
break;
default:

View File

@ -1403,9 +1403,6 @@ static int sky2_up(struct net_device *dev)
}
if (netif_msg_ifup(sky2))
printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
netif_carrier_off(dev);
/* must be power of 2 */
@ -1484,6 +1481,9 @@ static int sky2_up(struct net_device *dev)
sky2_write32(hw, B0_IMSK, imask);
sky2_set_multicast(dev);
if (netif_msg_ifup(sky2))
printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
return 0;
err_out:

View File

@ -954,7 +954,7 @@ smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
do {
udelay(1);
val = smsc911x_reg_read(pdata, RX_DP_CTRL);
} while (timeout-- && (val & RX_DP_CTRL_RX_FFWD_));
} while (--timeout && (val & RX_DP_CTRL_RX_FFWD_));
if (unlikely(timeout == 0))
SMSC_WARNING(HW, "Timed out waiting for "

View File

@ -1378,6 +1378,7 @@ static int smsc9420_open(struct net_device *dev)
/* test the IRQ connection to the ISR */
smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq);
pd->software_irq_signal = false;
spin_lock_irqsave(&pd->int_lock, flags);
/* configure interrupt deassertion timer and enable interrupts */
@ -1393,8 +1394,6 @@ static int smsc9420_open(struct net_device *dev)
smsc9420_pci_flush_write(pd);
timeout = 1000;
pd->software_irq_signal = false;
smp_wmb();
while (timeout--) {
if (pd->software_irq_signal)
break;

View File

@ -9,6 +9,11 @@
Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
for more information on this driver.
DC21143 manual "21143 PCI/CardBus 10/100Mb/s Ethernet LAN Controller
Hardware Reference Manual" is currently available at :
http://developer.intel.com/design/network/manuals/278074.htm
Please submit bugs to http://bugzilla.kernel.org/ .
*/
@ -32,7 +37,11 @@ void t21142_media_task(struct work_struct *work)
int csr12 = ioread32(ioaddr + CSR12);
int next_tick = 60*HZ;
int new_csr6 = 0;
int csr14 = ioread32(ioaddr + CSR14);
/* CSR12[LS10,LS100] are not reliable during autonegotiation */
if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
csr12 |= 6;
if (tulip_debug > 2)
printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n",
dev->name, csr12, medianame[dev->if_port]);
@ -76,7 +85,7 @@ void t21142_media_task(struct work_struct *work)
new_csr6 = 0x83860000;
dev->if_port = 3;
iowrite32(0, ioaddr + CSR13);
iowrite32(0x0003FF7F, ioaddr + CSR14);
iowrite32(0x0003FFFF, ioaddr + CSR14);
iowrite16(8, ioaddr + CSR15);
iowrite32(1, ioaddr + CSR13);
}
@ -132,10 +141,14 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
int csr12 = ioread32(ioaddr + CSR12);
int csr14 = ioread32(ioaddr + CSR14);
/* CSR12[LS10,LS100] are not reliable during autonegotiation */
if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
csr12 |= 6;
if (tulip_debug > 1)
printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, "
"%8.8x.\n", dev->name, csr12, csr5, ioread32(ioaddr + CSR14));
"%8.8x.\n", dev->name, csr12, csr5, csr14);
/* If NWay finished and we have a negotiated partner capability. */
if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
@ -143,7 +156,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
int negotiated = tp->sym_advertise & (csr12 >> 16);
tp->lpar = csr12 >> 16;
tp->nwayset = 1;
if (negotiated & 0x0100) dev->if_port = 5;
/* If partner cannot negotiate, it is 10Mbps Half Duplex */
if (!(csr12 & 0x8000)) dev->if_port = 0;
else if (negotiated & 0x0100) dev->if_port = 5;
else if (negotiated & 0x0080) dev->if_port = 3;
else if (negotiated & 0x0040) dev->if_port = 4;
else if (negotiated & 0x0020) dev->if_port = 0;
@ -214,7 +229,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
tp->timer.expires = RUN_AT(3*HZ);
add_timer(&tp->timer);
} else if (dev->if_port == 5)
iowrite32(ioread32(ioaddr + CSR14) & ~0x080, ioaddr + CSR14);
iowrite32(csr14 & ~0x080, ioaddr + CSR14);
} else if (dev->if_port == 0 || dev->if_port == 4) {
if ((csr12 & 4) == 0)
printk(KERN_INFO"%s: 21143 10baseT link beat good.\n",

View File

@ -1536,6 +1536,11 @@ static void adjust_link(struct net_device *dev)
static int init_phy(struct net_device *dev)
{
struct ucc_geth_private *priv = netdev_priv(dev);
struct device_node *np = priv->node;
struct device_node *phy, *mdio;
const phandle *ph;
char bus_name[MII_BUS_ID_SIZE];
const unsigned int *id;
struct phy_device *phydev;
char phy_id[BUS_ID_SIZE];
@ -1543,8 +1548,18 @@ static int init_phy(struct net_device *dev)
priv->oldspeed = 0;
priv->oldduplex = -1;
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, priv->ug_info->mdio_bus,
priv->ug_info->phy_address);
ph = of_get_property(np, "phy-handle", NULL);
phy = of_find_node_by_phandle(*ph);
mdio = of_get_parent(phy);
id = of_get_property(phy, "reg", NULL);
of_node_put(phy);
of_node_put(mdio);
uec_mdio_bus_name(bus_name, mdio);
snprintf(phy_id, sizeof(phy_id), "%s:%02x",
bus_name, *id);
phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface);
@ -3748,6 +3763,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
ugeth->ug_info = ug_info;
ugeth->dev = dev;
ugeth->node = np;
return 0;
}

View File

@ -1186,6 +1186,8 @@ struct ucc_geth_private {
int oldspeed;
int oldduplex;
int oldlink;
struct device_node *node;
};
void uec_set_ethtool_ops(struct net_device *netdev);

View File

@ -156,7 +156,7 @@ static int uec_mdio_probe(struct of_device *ofdev, const struct of_device_id *ma
if (err)
goto reg_map_fail;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
uec_mdio_bus_name(new_bus->id, np);
new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL);
@ -283,3 +283,13 @@ void uec_mdio_exit(void)
{
of_unregister_platform_driver(&uec_mdio_driver);
}
void uec_mdio_bus_name(char *name, struct device_node *np)
{
const u32 *reg;
reg = of_get_property(np, "reg", NULL);
snprintf(name, MII_BUS_ID_SIZE, "%s@%x", np->name, reg ? *reg : 0);
}

View File

@ -97,4 +97,5 @@ int uec_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
int uec_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
int __init uec_mdio_init(void);
void uec_mdio_exit(void);
void uec_mdio_bus_name(char *name, struct device_node *np);
#endif /* __UEC_MII_H */

View File

@ -288,7 +288,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
skb_put(skb, MAX_PACKET_LEN);
hdr = skb_vnet_hdr(skb);
sg_init_one(sg, hdr, sizeof(*hdr));
sg_set_buf(sg, hdr, sizeof(*hdr));
if (vi->big_packets) {
for (i = 0; i < MAX_SKB_FRAGS; i++) {
@ -489,9 +489,9 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
/* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
sg_init_one(sg, mhdr, sizeof(*mhdr));
sg_set_buf(sg, mhdr, sizeof(*mhdr));
else
sg_init_one(sg, hdr, sizeof(*hdr));
sg_set_buf(sg, hdr, sizeof(*hdr));
num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;

View File

@ -234,20 +234,6 @@ struct dentry *debugfs_create_i2400m_reset(
&fops_i2400m_reset);
}
/*
* Debug levels control; see debug.h
*/
struct d_level D_LEVEL[] = {
D_SUBMODULE_DEFINE(control),
D_SUBMODULE_DEFINE(driver),
D_SUBMODULE_DEFINE(debugfs),
D_SUBMODULE_DEFINE(fw),
D_SUBMODULE_DEFINE(netdev),
D_SUBMODULE_DEFINE(rfkill),
D_SUBMODULE_DEFINE(rx),
D_SUBMODULE_DEFINE(tx),
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
#define __debugfs_register(prefix, name, parent) \
do { \

View File

@ -707,6 +707,22 @@ void i2400m_release(struct i2400m *i2400m)
EXPORT_SYMBOL_GPL(i2400m_release);
/*
* Debug levels control; see debug.h
*/
struct d_level D_LEVEL[] = {
D_SUBMODULE_DEFINE(control),
D_SUBMODULE_DEFINE(driver),
D_SUBMODULE_DEFINE(debugfs),
D_SUBMODULE_DEFINE(fw),
D_SUBMODULE_DEFINE(netdev),
D_SUBMODULE_DEFINE(rfkill),
D_SUBMODULE_DEFINE(rx),
D_SUBMODULE_DEFINE(tx),
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
static
int __init i2400m_driver_init(void)
{

View File

@ -1028,6 +1028,8 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
* it's done by reseting the chip. To accomplish this we must
* first cleanup any pending DMA, then restart stuff after a la
* ath5k_init.
*
* Called with sc->lock.
*/
static int
ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
@ -2814,11 +2816,17 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath5k_softc *sc = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int ret;
mutex_lock(&sc->lock);
sc->bintval = conf->beacon_int;
sc->power_level = conf->power_level;
return ath5k_chan_set(sc, conf->channel);
ret = ath5k_chan_set(sc, conf->channel);
mutex_unlock(&sc->lock);
return ret;
}
static int

View File

@ -1719,6 +1719,10 @@ static int iwl_read_ucode(struct iwl_priv *priv)
priv->ucode_data_backup.len = data_size;
iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
!priv->ucode_data_backup.v_addr)
goto err_pci_alloc;
/* Initialization instructions and data */
if (init_size && init_data_size) {
priv->ucode_init.len = init_size;

View File

@ -285,7 +285,10 @@ static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
ofdm_power = priv->channels[channel - 1].hw_value >> 4;
cck_power = min(cck_power, (u8)11);
ofdm_power = min(ofdm_power, (u8)35);
if (ofdm_power > (u8)15)
ofdm_power = 25;
else
ofdm_power += 10;
rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1);
@ -536,7 +539,10 @@ static void rtl8225z2_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
cck_power += priv->txpwr_base & 0xF;
cck_power = min(cck_power, (u8)35);
ofdm_power = min(ofdm_power, (u8)15);
if (ofdm_power > (u8)15)
ofdm_power = 25;
else
ofdm_power += 10;
ofdm_power += priv->txpwr_base >> 4;
ofdm_power = min(ofdm_power, (u8)35);

View File

@ -538,6 +538,7 @@ static int dev_ifsioc(unsigned int fd, unsigned int cmd, unsigned long arg)
* cannot be fixed without breaking all existing apps.
*/
case TUNSETIFF:
case TUNGETIFF:
case SIOCGIFFLAGS:
case SIOCGIFMETRIC:
case SIOCGIFMTU:
@ -1982,6 +1983,11 @@ COMPATIBLE_IOCTL(TUNSETNOCSUM)
COMPATIBLE_IOCTL(TUNSETDEBUG)
COMPATIBLE_IOCTL(TUNSETPERSIST)
COMPATIBLE_IOCTL(TUNSETOWNER)
COMPATIBLE_IOCTL(TUNSETLINK)
COMPATIBLE_IOCTL(TUNSETGROUP)
COMPATIBLE_IOCTL(TUNGETFEATURES)
COMPATIBLE_IOCTL(TUNSETOFFLOAD)
COMPATIBLE_IOCTL(TUNSETTXFILTER)
/* Big V */
COMPATIBLE_IOCTL(VT_SETMODE)
COMPATIBLE_IOCTL(VT_GETMODE)
@ -2573,6 +2579,7 @@ HANDLE_IOCTL(SIOCGIFPFLAGS, dev_ifsioc)
HANDLE_IOCTL(SIOCGIFTXQLEN, dev_ifsioc)
HANDLE_IOCTL(SIOCSIFTXQLEN, dev_ifsioc)
HANDLE_IOCTL(TUNSETIFF, dev_ifsioc)
HANDLE_IOCTL(TUNGETIFF, dev_ifsioc)
HANDLE_IOCTL(SIOCETHTOOL, ethtool_ioctl)
HANDLE_IOCTL(SIOCBONDENSLAVE, bond_ioctl)
HANDLE_IOCTL(SIOCBONDRELEASE, bond_ioctl)

View File

@ -183,7 +183,7 @@ static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
size = 2048;
if (nr_pcpus >= 32)
size = 4096;
if (sizeof(rwlock_t) != 0) {
if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
if (size * sizeof(spinlock_t) > PAGE_SIZE)
hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));

View File

@ -2212,10 +2212,10 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
return 0;
next_skb:
block_limit = skb_headlen(st->cur_skb);
block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
if (abs_offset < block_limit) {
*data = st->cur_skb->data + abs_offset;
*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
return block_limit - abs_offset;
}
@ -2250,13 +2250,14 @@ next_skb:
st->frag_data = NULL;
}
if (st->cur_skb->next) {
st->cur_skb = st->cur_skb->next;
st->frag_idx = 0;
goto next_skb;
} else if (st->root_skb == st->cur_skb &&
if (st->root_skb == st->cur_skb &&
skb_shinfo(st->root_skb)->frag_list) {
st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
st->frag_idx = 0;
goto next_skb;
} else if (st->cur_skb->next) {
st->cur_skb = st->cur_skb->next;
st->frag_idx = 0;
goto next_skb;
}

View File

@ -1268,6 +1268,9 @@ __be32 __init root_nfs_parse_addr(char *name)
static int __init ip_auto_config(void)
{
__be32 addr;
#ifdef IPCONFIG_DYNAMIC
int retries = CONF_OPEN_RETRIES;
#endif
#ifdef CONFIG_PROC_FS
proc_net_fops_create(&init_net, "pnp", S_IRUGO, &pnp_seq_fops);
@ -1304,9 +1307,6 @@ static int __init ip_auto_config(void)
#endif
ic_first_dev->next) {
#ifdef IPCONFIG_DYNAMIC
int retries = CONF_OPEN_RETRIES;
if (ic_dynamic() < 0) {
ic_close_devs();

View File

@ -524,7 +524,8 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
struct tcp_splice_state *tss = rd_desc->arg.data;
int ret;
ret = skb_splice_bits(skb, offset, tss->pipe, rd_desc->count, tss->flags);
ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
tss->flags);
if (ret > 0)
rd_desc->count -= ret;
return ret;

View File

@ -120,8 +120,11 @@ EXPORT_SYMBOL(sysctl_udp_wmem_min);
atomic_t udp_memory_allocated;
EXPORT_SYMBOL(udp_memory_allocated);
#define PORTS_PER_CHAIN (65536 / UDP_HTABLE_SIZE)
static int udp_lib_lport_inuse(struct net *net, __u16 num,
const struct udp_hslot *hslot,
unsigned long *bitmap,
struct sock *sk,
int (*saddr_comp)(const struct sock *sk1,
const struct sock *sk2))
@ -132,12 +135,17 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
sk_nulls_for_each(sk2, node, &hslot->head)
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
sk2->sk_hash == num &&
(bitmap || sk2->sk_hash == num) &&
(!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
|| sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
(*saddr_comp)(sk, sk2))
(*saddr_comp)(sk, sk2)) {
if (bitmap)
__set_bit(sk2->sk_hash / UDP_HTABLE_SIZE,
bitmap);
else
return 1;
}
return 0;
}
@ -160,32 +168,47 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
if (!snum) {
int low, high, remaining;
unsigned rand;
unsigned short first;
unsigned short first, last;
DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
inet_get_local_port_range(&low, &high);
remaining = (high - low) + 1;
rand = net_random();
snum = first = rand % remaining + low;
rand |= 1;
for (;;) {
hslot = &udptable->hash[udp_hashfn(net, snum)];
first = (((u64)rand * remaining) >> 32) + low;
/*
* force rand to be an odd multiple of UDP_HTABLE_SIZE
*/
rand = (rand | 1) * UDP_HTABLE_SIZE;
for (last = first + UDP_HTABLE_SIZE; first != last; first++) {
hslot = &udptable->hash[udp_hashfn(net, first)];
bitmap_zero(bitmap, PORTS_PER_CHAIN);
spin_lock_bh(&hslot->lock);
if (!udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp))
break;
spin_unlock_bh(&hslot->lock);
udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
saddr_comp);
snum = first;
/*
* Iterate on all possible values of snum for this hash.
* Using steps of an odd multiple of UDP_HTABLE_SIZE
* give us randomization and full range coverage.
*/
do {
snum = snum + rand;
} while (snum < low || snum > high);
if (snum == first)
goto fail;
if (low <= snum && snum <= high &&
!test_bit(snum / UDP_HTABLE_SIZE, bitmap))
goto found;
snum += rand;
} while (snum != first);
spin_unlock_bh(&hslot->lock);
}
goto fail;
} else {
hslot = &udptable->hash[udp_hashfn(net, snum)];
spin_lock_bh(&hslot->lock);
if (udp_lib_lport_inuse(net, snum, hslot, sk, saddr_comp))
if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp))
goto fail_unlock;
}
found:
inet_sk(sk)->num = snum;
sk->sk_hash = snum;
if (sk_unhashed(sk)) {

View File

@ -4250,7 +4250,7 @@ static struct addrconf_sysctl_table
.procname = "mc_forwarding",
.data = &ipv6_devconf.mc_forwarding,
.maxlen = sizeof(int),
.mode = 0644,
.mode = 0444,
.proc_handler = proc_dointvec,
},
#endif

View File

@ -443,10 +443,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
goto relookup_failed;
if (ip6_dst_lookup(sk, &dst2, &fl))
if (ip6_dst_lookup(sk, &dst2, &fl2))
goto relookup_failed;
err = xfrm_lookup(net, &dst2, &fl, sk, XFRM_LOOKUP_ICMP);
err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP);
switch (err) {
case 0:
dst_release(dst);

View File

@ -255,6 +255,7 @@ int ip6_mc_input(struct sk_buff *skb)
* IPv6 multicast router mode is now supported ;)
*/
if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
!(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) &&
likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
/*
* Okay, we try to forward - split and duplicate
@ -316,7 +317,6 @@ int ip6_mc_input(struct sk_buff *skb)
}
if (skb2) {
skb2->dev = skb2->dst->dev;
ip6_mr_input(skb2);
}
}

View File

@ -365,7 +365,9 @@ static int pim6_rcv(struct sk_buff *skb)
pim = (struct pimreghdr *)skb_transport_header(skb);
if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
(pim->flags & PIM_NULL_REGISTER) ||
(ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
sizeof(*pim), IPPROTO_PIM,
csum_partial((void *)pim, sizeof(*pim), 0)) &&
csum_fold(skb_checksum(skb, 0, skb->len, 0))))
goto drop;
@ -392,7 +394,7 @@ static int pim6_rcv(struct sk_buff *skb)
skb_pull(skb, (u8 *)encap - skb->data);
skb_reset_network_header(skb);
skb->dev = reg_dev;
skb->protocol = htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = 0;
skb->pkt_type = PACKET_HOST;
dst_release(skb->dst);
@ -481,6 +483,7 @@ static int mif6_delete(struct net *net, int vifi)
{
struct mif_device *v;
struct net_device *dev;
struct inet6_dev *in6_dev;
if (vifi < 0 || vifi >= net->ipv6.maxvif)
return -EADDRNOTAVAIL;
@ -513,6 +516,10 @@ static int mif6_delete(struct net *net, int vifi)
dev_set_allmulti(dev, -1);
in6_dev = __in6_dev_get(dev);
if (in6_dev)
in6_dev->cnf.mc_forwarding--;
if (v->flags & MIFF_REGISTER)
unregister_netdevice(dev);
@ -622,6 +629,7 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
int vifi = vifc->mif6c_mifi;
struct mif_device *v = &net->ipv6.vif6_table[vifi];
struct net_device *dev;
struct inet6_dev *in6_dev;
int err;
/* Is vif busy ? */
@ -662,6 +670,10 @@ static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
return -EINVAL;
}
in6_dev = __in6_dev_get(dev);
if (in6_dev)
in6_dev->cnf.mc_forwarding++;
/*
* Fill in the VIF structures
*/
@ -838,8 +850,6 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
skb->dst = dst_clone(pkt->dst);
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_pull(skb, sizeof(struct ipv6hdr));
}
if (net->ipv6.mroute6_sk == NULL) {
@ -1222,8 +1232,10 @@ static int ip6mr_sk_init(struct sock *sk)
rtnl_lock();
write_lock_bh(&mrt_lock);
if (likely(net->ipv6.mroute6_sk == NULL))
if (likely(net->ipv6.mroute6_sk == NULL)) {
net->ipv6.mroute6_sk = sk;
net->ipv6.devconf_all->mc_forwarding++;
}
else
err = -EADDRINUSE;
write_unlock_bh(&mrt_lock);
@ -1242,6 +1254,7 @@ int ip6mr_sk_done(struct sock *sk)
if (sk == net->ipv6.mroute6_sk) {
write_lock_bh(&mrt_lock);
net->ipv6.mroute6_sk = NULL;
net->ipv6.devconf_all->mc_forwarding--;
write_unlock_bh(&mrt_lock);
mroute_clean_tables(net);

View File

@ -794,7 +794,7 @@ void ip6_route_input(struct sk_buff *skb)
.proto = iph->nexthdr,
};
if (rt6_need_strict(&iph->daddr))
if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
flags |= RT6_LOOKUP_F_IFACE;
skb->dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input);

View File

@ -77,6 +77,7 @@
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mutex.h>
#ifdef CONFIG_INET
#include <net/inet_common.h>
@ -175,6 +176,7 @@ struct packet_sock {
#endif
struct packet_type prot_hook;
spinlock_t bind_lock;
struct mutex pg_vec_lock;
unsigned int running:1, /* prot_hook is attached*/
auxdata:1,
origdev:1;
@ -1069,6 +1071,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol)
*/
spin_lock_init(&po->bind_lock);
mutex_init(&po->pg_vec_lock);
po->prot_hook.func = packet_rcv;
if (sock->type == SOCK_PACKET)
@ -1865,6 +1868,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
synchronize_net();
err = -EBUSY;
mutex_lock(&po->pg_vec_lock);
if (closing || atomic_read(&po->mapped) == 0) {
err = 0;
#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
@ -1886,6 +1890,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing
if (atomic_read(&po->mapped))
printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped));
}
mutex_unlock(&po->pg_vec_lock);
spin_lock(&po->bind_lock);
if (was_running && !po->running) {
@ -1918,7 +1923,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
size = vma->vm_end - vma->vm_start;
lock_sock(sk);
mutex_lock(&po->pg_vec_lock);
if (po->pg_vec == NULL)
goto out;
if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
@ -1941,7 +1946,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st
err = 0;
out:
release_sock(sk);
mutex_unlock(&po->pg_vec_lock);
return err;
}
#endif

View File

@ -28,17 +28,6 @@
#include "debug-levels.h"
/* Debug framework control of debug levels */
struct d_level D_LEVEL[] = {
D_SUBMODULE_DEFINE(debugfs),
D_SUBMODULE_DEFINE(id_table),
D_SUBMODULE_DEFINE(op_msg),
D_SUBMODULE_DEFINE(op_reset),
D_SUBMODULE_DEFINE(op_rfkill),
D_SUBMODULE_DEFINE(stack),
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
#define __debugfs_register(prefix, name, parent) \
do { \
result = d_level_register_debugfs(prefix, name, parent); \

View File

@ -516,6 +516,19 @@ void wimax_dev_rm(struct wimax_dev *wimax_dev)
}
EXPORT_SYMBOL_GPL(wimax_dev_rm);
/* Debug framework control of debug levels */
struct d_level D_LEVEL[] = {
D_SUBMODULE_DEFINE(debugfs),
D_SUBMODULE_DEFINE(id_table),
D_SUBMODULE_DEFINE(op_msg),
D_SUBMODULE_DEFINE(op_reset),
D_SUBMODULE_DEFINE(op_rfkill),
D_SUBMODULE_DEFINE(stack),
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
struct genl_family wimax_gnl_family = {
.id = GENL_ID_GENERATE,
.name = "WiMAX",

View File

@ -498,6 +498,7 @@ static struct ieee80211_regdomain *country_ie_2_rd(
* calculate the number of reg rules we will need. We will need one
* for each channel subband */
while (country_ie_len >= 3) {
int end_channel = 0;
struct ieee80211_country_ie_triplet *triplet =
(struct ieee80211_country_ie_triplet *) country_ie;
int cur_sub_max_channel = 0, cur_channel = 0;
@ -509,9 +510,25 @@ static struct ieee80211_regdomain *country_ie_2_rd(
continue;
}
/* 2 GHz */
if (triplet->chans.first_channel <= 14)
end_channel = triplet->chans.first_channel +
triplet->chans.num_channels;
else
/*
* 5 GHz -- For example in country IEs if the first
* channel given is 36 and the number of channels is 4
* then the individual channel numbers defined for the
* 5 GHz PHY by these parameters are: 36, 40, 44, and 48
* and not 36, 37, 38, 39.
*
* See: http://tinyurl.com/11d-clarification
*/
end_channel = triplet->chans.first_channel +
(4 * (triplet->chans.num_channels - 1));
cur_channel = triplet->chans.first_channel;
cur_sub_max_channel = ieee80211_channel_to_frequency(
cur_channel + triplet->chans.num_channels);
cur_sub_max_channel = end_channel;
/* Basic sanity check */
if (cur_sub_max_channel < cur_channel)
@ -590,15 +607,6 @@ static struct ieee80211_regdomain *country_ie_2_rd(
end_channel = triplet->chans.first_channel +
triplet->chans.num_channels;
else
/*
* 5 GHz -- For example in country IEs if the first
* channel given is 36 and the number of channels is 4
* then the individual channel numbers defined for the
* 5 GHz PHY by these parameters are: 36, 40, 44, and 48
* and not 36, 37, 38, 39.
*
* See: http://tinyurl.com/11d-clarification
*/
end_channel = triplet->chans.first_channel +
(4 * (triplet->chans.num_channels - 1));
@ -1276,7 +1284,7 @@ static void reg_country_ie_process_debug(
if (intersected_rd) {
printk(KERN_DEBUG "cfg80211: We intersect both of these "
"and get:\n");
print_regdomain_info(rd);
print_regdomain_info(intersected_rd);
return;
}
printk(KERN_DEBUG "cfg80211: Intersection between both failed\n");