Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Don't use uninitialized data in IPVS, from Dan Carpenter.

 2) conntrack race fixes from Pablo Neira Ayuso.

 3) Fix TX hangs with i40e, from Jesse Brandeburg.

 4) Fix budget return from poll calls in dnet and alx, from Eric
    Dumazet.

 5) Fix bugus "if (unlikely(x) < 0)" test in AF_PACKET, from Christoph
    Jaeger.

 6) Fix bug introduced by conversion to list_head in TIPC retransmit
    code, from Jon Paul Maloy.

 7) Don't use GFP_NOIO under spinlock in USB kaweth driver, from Alexey
    Khoroshilov.

 8) Fix bridge build with INET disabled, from Arnd Bergmann.

 9) Fix netlink array overrun for PROBE attributes in openvswitch, from
    Thomas Graf.

10) Don't hold spinlock across synchronize_irq() in tg3 driver, from
    Prashant Sreedharan.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (44 commits)
  tg3: Release tp->lock before invoking synchronize_irq()
  tg3: tg3_reset_task() needs to use rtnl_lock to synchronize
  tg3: tg3_timer() should grab tp->lock before checking for tp->irq_sync
  team: avoid possible underflow of count_pending value for notify_peers and mcast_rejoin
  openvswitch: packet messages need their own probe attribtue
  i40e: adds FCoE configure option
  cxgb4vf: Fix queue allocation for 40G adapter
  netdevice: Add missing parentheses in macro
  bridge: only provide proxy ARP when CONFIG_INET is enabled
  neighbour: fix base_reachable_time(_ms) not effective immediatly when changed
  net: fec: fix MDIO bus assignement for dual fec SoC's
  xen-netfront: use different locks for Rx and Tx stats
  drivers: net: cpsw: fix multicast flush in dual emac mode
  cxgb4vf: Initialize mdio_addr before using it
  net: Corrected the comment describing the ndo operations to reflect the actual prototype for couple of operations
  usb/kaweth: use GFP_ATOMIC under spin_lock in usb_start_wait_urb()
  MAINTAINERS: add me as ibmveth maintainer
  tipc: fix bug in broadcast retransmit code
  update ip-sysctl.txt documentation (v2)
  net/at91_ether: prepare and unprepare clock
  ...
This commit is contained in:
Linus Torvalds 2015-01-15 11:17:37 +13:00
commit a6391a924c
49 changed files with 420 additions and 176 deletions

View File

@ -66,6 +66,8 @@ fwmark_reflect - BOOLEAN
route/max_size - INTEGER route/max_size - INTEGER
Maximum number of routes allowed in the kernel. Increase Maximum number of routes allowed in the kernel. Increase
this when using large numbers of interfaces and/or routes. this when using large numbers of interfaces and/or routes.
From linux kernel 3.6 onwards, this is deprecated for ipv4
as route cache is no longer used.
neigh/default/gc_thresh1 - INTEGER neigh/default/gc_thresh1 - INTEGER
Minimum number of entries to keep. Garbage collector will not Minimum number of entries to keep. Garbage collector will not

View File

@ -4749,7 +4749,7 @@ S: Supported
F: drivers/scsi/ipr.* F: drivers/scsi/ipr.*
IBM Power Virtual Ethernet Device Driver IBM Power Virtual Ethernet Device Driver
M: Santiago Leon <santil@linux.vnet.ibm.com> M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
F: drivers/net/ethernet/ibm/ibmveth.* F: drivers/net/ethernet/ibm/ibmveth.*

View File

@ -159,13 +159,28 @@
pinctrl-0 = <&pinctrl_enet1>; pinctrl-0 = <&pinctrl_enet1>;
phy-supply = <&reg_enet_3v3>; phy-supply = <&reg_enet_3v3>;
phy-mode = "rgmii"; phy-mode = "rgmii";
phy-handle = <&ethphy1>;
status = "okay"; status = "okay";
mdio {
#address-cells = <1>;
#size-cells = <0>;
ethphy1: ethernet-phy@0 {
reg = <0>;
};
ethphy2: ethernet-phy@1 {
reg = <1>;
};
};
}; };
&fec2 { &fec2 {
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet2>; pinctrl-0 = <&pinctrl_enet2>;
phy-mode = "rgmii"; phy-mode = "rgmii";
phy-handle = <&ethphy2>;
status = "okay"; status = "okay";
}; };

View File

@ -129,13 +129,28 @@
&fec0 { &fec0 {
phy-mode = "rmii"; phy-mode = "rmii";
phy-handle = <&ethphy0>;
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec0>; pinctrl-0 = <&pinctrl_fec0>;
status = "okay"; status = "okay";
mdio {
#address-cells = <1>;
#size-cells = <0>;
ethphy0: ethernet-phy@0 {
reg = <0>;
};
ethphy1: ethernet-phy@1 {
reg = <1>;
};
};
}; };
&fec1 { &fec1 {
phy-mode = "rmii"; phy-mode = "rmii";
phy-handle = <&ethphy1>;
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec1>; pinctrl-0 = <&pinctrl_fec1>;
status = "okay"; status = "okay";

View File

@ -4880,7 +4880,7 @@ static void sig_ind(PLCI *plci)
byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/ byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00"; byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00";
byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"; byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00"; byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00";
byte force_mt_info = false; byte force_mt_info = false;
byte dir; byte dir;
dword d; dword d;

View File

@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx)
schedule_work(&alx->reset_wk); schedule_work(&alx->reset_wk);
} }
static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
{ {
struct alx_rx_queue *rxq = &alx->rxq; struct alx_rx_queue *rxq = &alx->rxq;
struct alx_rrd *rrd; struct alx_rrd *rrd;
struct alx_buffer *rxb; struct alx_buffer *rxb;
struct sk_buff *skb; struct sk_buff *skb;
u16 length, rfd_cleaned = 0; u16 length, rfd_cleaned = 0;
int work = 0;
while (budget > 0) { while (work < budget) {
rrd = &rxq->rrd[rxq->rrd_read_idx]; rrd = &rxq->rrd[rxq->rrd_read_idx];
if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
break; break;
@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
ALX_GET_FIELD(le32_to_cpu(rrd->word0), ALX_GET_FIELD(le32_to_cpu(rrd->word0),
RRD_NOR) != 1) { RRD_NOR) != 1) {
alx_schedule_reset(alx); alx_schedule_reset(alx);
return 0; return work;
} }
rxb = &rxq->bufs[rxq->read_idx]; rxb = &rxq->bufs[rxq->read_idx];
@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
} }
napi_gro_receive(&alx->napi, skb); napi_gro_receive(&alx->napi, skb);
budget--; work++;
next_pkt: next_pkt:
if (++rxq->read_idx == alx->rx_ringsz) if (++rxq->read_idx == alx->rx_ringsz)
@ -258,21 +259,22 @@ next_pkt:
if (rfd_cleaned) if (rfd_cleaned)
alx_refill_rx_ring(alx, GFP_ATOMIC); alx_refill_rx_ring(alx, GFP_ATOMIC);
return budget > 0; return work;
} }
static int alx_poll(struct napi_struct *napi, int budget) static int alx_poll(struct napi_struct *napi, int budget)
{ {
struct alx_priv *alx = container_of(napi, struct alx_priv, napi); struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
struct alx_hw *hw = &alx->hw; struct alx_hw *hw = &alx->hw;
bool complete = true;
unsigned long flags; unsigned long flags;
bool tx_complete;
int work;
complete = alx_clean_tx_irq(alx) && tx_complete = alx_clean_tx_irq(alx);
alx_clean_rx_irq(alx, budget); work = alx_clean_rx_irq(alx, budget);
if (!complete) if (!tx_complete || work == budget)
return 1; return budget;
napi_complete(&alx->napi); napi_complete(&alx->napi);
@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
alx_post_write(hw); alx_post_write(hw);
return 0; return work;
} }
static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)

View File

@ -7413,6 +7413,8 @@ static inline void tg3_netif_start(struct tg3 *tp)
} }
static void tg3_irq_quiesce(struct tg3 *tp) static void tg3_irq_quiesce(struct tg3 *tp)
__releases(tp->lock)
__acquires(tp->lock)
{ {
int i; int i;
@ -7421,8 +7423,12 @@ static void tg3_irq_quiesce(struct tg3 *tp)
tp->irq_sync = 1; tp->irq_sync = 1;
smp_mb(); smp_mb();
spin_unlock_bh(&tp->lock);
for (i = 0; i < tp->irq_cnt; i++) for (i = 0; i < tp->irq_cnt; i++)
synchronize_irq(tp->napi[i].irq_vec); synchronize_irq(tp->napi[i].irq_vec);
spin_lock_bh(&tp->lock);
} }
/* Fully shutdown all tg3 driver activity elsewhere in the system. /* Fully shutdown all tg3 driver activity elsewhere in the system.
@ -9018,6 +9024,8 @@ static void tg3_restore_clk(struct tg3 *tp)
/* tp->lock is held. */ /* tp->lock is held. */
static int tg3_chip_reset(struct tg3 *tp) static int tg3_chip_reset(struct tg3 *tp)
__releases(tp->lock)
__acquires(tp->lock)
{ {
u32 val; u32 val;
void (*write_op)(struct tg3 *, u32, u32); void (*write_op)(struct tg3 *, u32, u32);
@ -9073,9 +9081,13 @@ static int tg3_chip_reset(struct tg3 *tp)
} }
smp_mb(); smp_mb();
tg3_full_unlock(tp);
for (i = 0; i < tp->irq_cnt; i++) for (i = 0; i < tp->irq_cnt; i++)
synchronize_irq(tp->napi[i].irq_vec); synchronize_irq(tp->napi[i].irq_vec);
tg3_full_lock(tp, 0);
if (tg3_asic_rev(tp) == ASIC_REV_57780) { if (tg3_asic_rev(tp) == ASIC_REV_57780) {
val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
@ -10903,11 +10915,13 @@ static void tg3_timer(unsigned long __opaque)
{ {
struct tg3 *tp = (struct tg3 *) __opaque; struct tg3 *tp = (struct tg3 *) __opaque;
if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
goto restart_timer;
spin_lock(&tp->lock); spin_lock(&tp->lock);
if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
spin_unlock(&tp->lock);
goto restart_timer;
}
if (tg3_asic_rev(tp) == ASIC_REV_5717 || if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_flag(tp, 57765_CLASS)) tg3_flag(tp, 57765_CLASS))
tg3_chk_missed_msi(tp); tg3_chk_missed_msi(tp);
@ -11101,11 +11115,13 @@ static void tg3_reset_task(struct work_struct *work)
struct tg3 *tp = container_of(work, struct tg3, reset_task); struct tg3 *tp = container_of(work, struct tg3, reset_task);
int err; int err;
rtnl_lock();
tg3_full_lock(tp, 0); tg3_full_lock(tp, 0);
if (!netif_running(tp->dev)) { if (!netif_running(tp->dev)) {
tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_flag_clear(tp, RESET_TASK_PENDING);
tg3_full_unlock(tp); tg3_full_unlock(tp);
rtnl_unlock();
return; return;
} }
@ -11138,6 +11154,7 @@ out:
tg3_phy_start(tp); tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_flag_clear(tp, RESET_TASK_PENDING);
rtnl_unlock();
} }
static int tg3_request_irq(struct tg3 *tp, int irq_num) static int tg3_request_irq(struct tg3 *tp, int irq_num)

View File

@ -340,7 +340,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
res = PTR_ERR(lp->pclk); res = PTR_ERR(lp->pclk);
goto err_free_dev; goto err_free_dev;
} }
clk_enable(lp->pclk); clk_prepare_enable(lp->pclk);
lp->hclk = ERR_PTR(-ENOENT); lp->hclk = ERR_PTR(-ENOENT);
lp->tx_clk = ERR_PTR(-ENOENT); lp->tx_clk = ERR_PTR(-ENOENT);
@ -406,7 +406,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
err_out_unregister_netdev: err_out_unregister_netdev:
unregister_netdev(dev); unregister_netdev(dev);
err_disable_clock: err_disable_clock:
clk_disable(lp->pclk); clk_disable_unprepare(lp->pclk);
err_free_dev: err_free_dev:
free_netdev(dev); free_netdev(dev);
return res; return res;
@ -424,7 +424,7 @@ static int at91ether_remove(struct platform_device *pdev)
kfree(lp->mii_bus->irq); kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus); mdiobus_free(lp->mii_bus);
unregister_netdev(dev); unregister_netdev(dev);
clk_disable(lp->pclk); clk_disable_unprepare(lp->pclk);
free_netdev(dev); free_netdev(dev);
return 0; return 0;
@ -440,7 +440,7 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
netif_stop_queue(net_dev); netif_stop_queue(net_dev);
netif_device_detach(net_dev); netif_device_detach(net_dev);
clk_disable(lp->pclk); clk_disable_unprepare(lp->pclk);
} }
return 0; return 0;
} }
@ -451,7 +451,7 @@ static int at91ether_resume(struct platform_device *pdev)
struct macb *lp = netdev_priv(net_dev); struct macb *lp = netdev_priv(net_dev);
if (netif_running(net_dev)) { if (netif_running(net_dev)) {
clk_enable(lp->pclk); clk_prepare_enable(lp->pclk);
netif_device_attach(net_dev); netif_device_attach(net_dev);
netif_start_queue(net_dev); netif_start_queue(net_dev);

View File

@ -2430,7 +2430,7 @@ static void cfg_queues(struct adapter *adapter)
*/ */
n10g = 0; n10g = 0;
for_each_port(adapter, pidx) for_each_port(adapter, pidx)
n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
/* /*
* We default to 1 queue per non-10G port and up to # of cores queues * We default to 1 queue per non-10G port and up to # of cores queues

View File

@ -323,6 +323,8 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
return v; return v;
v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ?
FW_PORT_CMD_MDIOADDR_G(v) : -1;
pi->port_type = FW_PORT_CMD_PTYPE_G(v); pi->port_type = FW_PORT_CMD_PTYPE_G(v);
pi->mod_type = FW_PORT_MOD_TYPE_NA; pi->mod_type = FW_PORT_MOD_TYPE_NA;

View File

@ -398,13 +398,8 @@ static int dnet_poll(struct napi_struct *napi, int budget)
* break out of while loop if there are no more * break out of while loop if there are no more
* packets waiting * packets waiting
*/ */
if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) { if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
napi_complete(napi); break;
int_enable = dnet_readl(bp, INTR_ENB);
int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
dnet_writel(bp, int_enable, INTR_ENB);
return 0;
}
cmd_word = dnet_readl(bp, RX_LEN_FIFO); cmd_word = dnet_readl(bp, RX_LEN_FIFO);
pkt_len = cmd_word & 0xFFFF; pkt_len = cmd_word & 0xFFFF;
@ -433,20 +428,17 @@ static int dnet_poll(struct napi_struct *napi, int budget)
"size %u.\n", dev->name, pkt_len); "size %u.\n", dev->name, pkt_len);
} }
budget -= npackets;
if (npackets < budget) { if (npackets < budget) {
/* We processed all packets available. Tell NAPI it can /* We processed all packets available. Tell NAPI it can
* stop polling then re-enable rx interrupts */ * stop polling then re-enable rx interrupts.
*/
napi_complete(napi); napi_complete(napi);
int_enable = dnet_readl(bp, INTR_ENB); int_enable = dnet_readl(bp, INTR_ENB);
int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
dnet_writel(bp, int_enable, INTR_ENB); dnet_writel(bp, int_enable, INTR_ENB);
return 0;
} }
/* There are still packets waiting */ return npackets;
return 1;
} }
static irqreturn_t dnet_interrupt(int irq, void *dev_id) static irqreturn_t dnet_interrupt(int irq, void *dev_id)

View File

@ -424,6 +424,8 @@ struct bufdesc_ex {
* (40ns * 6). * (40ns * 6).
*/ */
#define FEC_QUIRK_BUG_CAPTURE (1 << 10) #define FEC_QUIRK_BUG_CAPTURE (1 << 10)
/* Controller has only one MDIO bus */
#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
struct fec_enet_priv_tx_q { struct fec_enet_priv_tx_q {
int index; int index;

View File

@ -91,7 +91,8 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0, .driver_data = 0,
}, { }, {
.name = "imx28-fec", .name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
FEC_QUIRK_SINGLE_MDIO,
}, { }, {
.name = "imx6q-fec", .name = "imx6q-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
@ -1937,7 +1938,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
int err = -ENXIO, i; int err = -ENXIO, i;
/* /*
* The dual fec interfaces are not equivalent with enet-mac. * The i.MX28 dual fec interfaces are not equal.
* Here are the differences: * Here are the differences:
* *
* - fec0 supports MII & RMII modes while fec1 only supports RMII * - fec0 supports MII & RMII modes while fec1 only supports RMII
@ -1952,7 +1953,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* mdio interface in board design, and need to be configured by * mdio interface in board design, and need to be configured by
* fec0 mii_bus. * fec0 mii_bus.
*/ */
if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */ /* fec1 uses fec0 mii_bus */
if (mii_cnt && fec0_mii_bus) { if (mii_cnt && fec0_mii_bus) {
fep->mii_bus = fec0_mii_bus; fep->mii_bus = fec0_mii_bus;
@ -2015,7 +2016,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
mii_cnt++; mii_cnt++;
/* save fec0 mii_bus */ /* save fec0 mii_bus */
if (fep->quirks & FEC_QUIRK_ENET_MAC) if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
fec0_mii_bus = fep->mii_bus; fec0_mii_bus = fep->mii_bus;
return 0; return 0;
@ -3129,6 +3130,7 @@ fec_probe(struct platform_device *pdev)
pdev->id_entry = of_id->data; pdev->id_entry = of_id->data;
fep->quirks = pdev->id_entry->driver_data; fep->quirks = pdev->id_entry->driver_data;
fep->netdev = ndev;
fep->num_rx_queues = num_rx_qs; fep->num_rx_queues = num_rx_qs;
fep->num_tx_queues = num_tx_qs; fep->num_tx_queues = num_tx_qs;

View File

@ -281,6 +281,17 @@ config I40E_DCB
If unsure, say N. If unsure, say N.
config I40E_FCOE
bool "Fibre Channel over Ethernet (FCoE)"
default n
depends on I40E && DCB && FCOE
---help---
Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
in the driver. This will create new netdev for exclusive FCoE
use with XL710 FCoE offloads enabled.
If unsure, say N.
config I40EVF config I40EVF
tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
depends on PCI_MSI depends on PCI_MSI

View File

@ -44,4 +44,4 @@ i40e-objs := i40e_main.o \
i40e_virtchnl_pf.o i40e_virtchnl_pf.o
i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o

View File

@ -78,7 +78,7 @@ do { \
} while (0) } while (0)
typedef enum i40e_status_code i40e_status; typedef enum i40e_status_code i40e_status;
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) #ifdef CONFIG_I40E_FCOE
#define I40E_FCOE #define I40E_FCOE
#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */ #endif
#endif /* _I40E_OSDEP_H_ */ #endif /* _I40E_OSDEP_H_ */

View File

@ -658,6 +658,8 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
return le32_to_cpu(*(volatile __le32 *)head); return le32_to_cpu(*(volatile __le32 *)head);
} }
#define WB_STRIDE 0x3
/** /**
* i40e_clean_tx_irq - Reclaim resources after transmit completes * i40e_clean_tx_irq - Reclaim resources after transmit completes
* @tx_ring: tx ring to clean * @tx_ring: tx ring to clean
@ -759,6 +761,18 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets; tx_ring->q_vector->tx.total_packets += total_packets;
/* check to see if there are any non-cache aligned descriptors
* waiting to be written back, and kick the hardware to force
* them to be written back in case of napi polling
*/
if (budget &&
!((i & WB_STRIDE) == WB_STRIDE) &&
!test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
else
tx_ring->arm_wb = false;
if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */ /* schedule immediate reset if we believe we hung */
dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@ -777,13 +791,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
dev_info(tx_ring->dev, dev_info(tx_ring->dev,
"tx hang detected on queue %d, resetting adapter\n", "tx hang detected on queue %d, reset requested\n",
tx_ring->queue_index); tx_ring->queue_index);
tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev); /* do not fire the reset immediately, wait for the stack to
* decide we are truly stuck, also prevents every queue from
* simultaneously requesting a reset
*/
/* the adapter is about to reset, no point in enabling stuff */ /* the adapter is about to reset, no point in enabling polling */
return true; budget = 1;
} }
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
@ -806,7 +823,25 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
} }
} }
return budget > 0; return !!budget;
}
/**
* i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
* @vsi: the VSI we care about
* @q_vector: the vector on which to force writeback
*
**/
static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK
/* allow 00 to be written to the index */;
wr32(&vsi->back->hw,
I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
val);
} }
/** /**
@ -1290,9 +1325,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
* so the total length of IPv4 header is IHL*4 bytes * so the total length of IPv4 header is IHL*4 bytes
* The UDP_0 bit *may* bet set if the *inner* header is UDP * The UDP_0 bit *may* bet set if the *inner* header is UDP
*/ */
if (ipv4_tunnel && if (ipv4_tunnel) {
(decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
!(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
skb->transport_header = skb->mac_header + skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) + sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4); (ip_hdr(skb)->ihl * 4);
@ -1302,15 +1335,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
skb->protocol == htons(ETH_P_8021AD)) skb->protocol == htons(ETH_P_8021AD))
? VLAN_HLEN : 0; ? VLAN_HLEN : 0;
rx_udp_csum = udp_csum(skb); if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
iph = ip_hdr(skb); (udp_hdr(skb)->check != 0)) {
csum = csum_tcpudp_magic( rx_udp_csum = udp_csum(skb);
iph->saddr, iph->daddr, iph = ip_hdr(skb);
(skb->len - skb_transport_offset(skb)), csum = csum_tcpudp_magic(
IPPROTO_UDP, rx_udp_csum); iph->saddr, iph->daddr,
(skb->len - skb_transport_offset(skb)),
IPPROTO_UDP, rx_udp_csum);
if (udp_hdr(skb)->check != csum) if (udp_hdr(skb)->check != csum)
goto checksum_fail; goto checksum_fail;
} /* else its GRE and so no outer UDP header */
} }
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
@ -1581,6 +1618,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
struct i40e_vsi *vsi = q_vector->vsi; struct i40e_vsi *vsi = q_vector->vsi;
struct i40e_ring *ring; struct i40e_ring *ring;
bool clean_complete = true; bool clean_complete = true;
bool arm_wb = false;
int budget_per_ring; int budget_per_ring;
if (test_bit(__I40E_DOWN, &vsi->state)) { if (test_bit(__I40E_DOWN, &vsi->state)) {
@ -1591,8 +1629,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
/* Since the actual Tx work is minimal, we can give the Tx a larger /* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors. * budget and be more aggressive about cleaning up the Tx descriptors.
*/ */
i40e_for_each_ring(ring, q_vector->tx) i40e_for_each_ring(ring, q_vector->tx) {
clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
arm_wb |= ring->arm_wb;
}
/* We attempt to distribute budget to each Rx queue fairly, but don't /* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early. * allow the budget to go below 1 because that would exit polling early.
@ -1603,8 +1643,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
/* If work not completed, return budget and polling will return */ /* If work not completed, return budget and polling will return */
if (!clean_complete) if (!clean_complete) {
if (arm_wb)
i40e_force_wb(vsi, q_vector);
return budget; return budget;
}
/* Work is done so exit the polling mode and re-enable the interrupt */ /* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete(napi); napi_complete(napi);
@ -1840,17 +1883,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (err < 0) if (err < 0)
return err; return err;
if (protocol == htons(ETH_P_IP)) { iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
if (iph->version == 4) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0; iph->tot_len = 0;
iph->check = 0; iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0); 0, IPPROTO_TCP, 0);
} else if (skb_is_gso_v6(skb)) { } else if (ipv6h->version == 6) {
ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
: ipv6_hdr(skb);
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
ipv6h->payload_len = 0; ipv6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@ -1946,13 +1988,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
} }
} else if (tx_flags & I40E_TX_FLAGS_IPV6) { } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
if (tx_flags & I40E_TX_FLAGS_TSO) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; if (tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0; ip_hdr(skb)->check = 0;
} else {
*cd_tunneling |=
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
} }
/* Now set the ctx descriptor fields */ /* Now set the ctx descriptor fields */
@ -1962,7 +2000,10 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
((skb_inner_network_offset(skb) - ((skb_inner_network_offset(skb) -
skb_transport_offset(skb)) >> 1) << skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT; I40E_TXD_CTX_QW0_NATLEN_SHIFT;
if (this_ip_hdr->version == 6) {
tx_flags &= ~I40E_TX_FLAGS_IPV4;
tx_flags |= I40E_TX_FLAGS_IPV6;
}
} else { } else {
network_hdr_len = skb_network_header_len(skb); network_hdr_len = skb_network_header_len(skb);
this_ip_hdr = ip_hdr(skb); this_ip_hdr = ip_hdr(skb);
@ -2198,7 +2239,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* Place RS bit on last descriptor of any packet that spans across the /* Place RS bit on last descriptor of any packet that spans across the
* 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
*/ */
#define WB_STRIDE 0x3
if (((i & WB_STRIDE) != WB_STRIDE) && if (((i & WB_STRIDE) != WB_STRIDE) &&
(first <= &tx_ring->tx_bi[i]) && (first <= &tx_ring->tx_bi[i]) &&
(first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {

View File

@ -241,6 +241,7 @@ struct i40e_ring {
unsigned long last_rx_timestamp; unsigned long last_rx_timestamp;
bool ring_active; /* is ring online or not */ bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */
/* stats structs */ /* stats structs */
struct i40e_queue_stats stats; struct i40e_queue_stats stats;

View File

@ -473,6 +473,7 @@ static struct sh_eth_cpu_data r8a777x_data = {
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI, EESR_ECI,
.fdr_value = 0x00000f0f,
.apr = 1, .apr = 1,
.mpr = 1, .mpr = 1,
@ -495,6 +496,7 @@ static struct sh_eth_cpu_data r8a779x_data = {
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI, EESR_ECI,
.fdr_value = 0x00000f0f,
.apr = 1, .apr = 1,
.mpr = 1, .mpr = 1,
@ -536,6 +538,8 @@ static struct sh_eth_cpu_data sh7724_data = {
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI, EESR_ECI,
.trscer_err_mask = DESC_I_RINT8,
.apr = 1, .apr = 1,
.mpr = 1, .mpr = 1,
.tpauser = 1, .tpauser = 1,
@ -856,6 +860,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
if (!cd->eesr_err_check) if (!cd->eesr_err_check)
cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
if (!cd->trscer_err_mask)
cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
} }
static int sh_eth_check_reset(struct net_device *ndev) static int sh_eth_check_reset(struct net_device *ndev)
@ -1294,7 +1301,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
/* Frame recv control (enable multiple-packets per rx irq) */ /* Frame recv control (enable multiple-packets per rx irq) */
sh_eth_write(ndev, RMCR_RNC, RMCR); sh_eth_write(ndev, RMCR_RNC, RMCR);
sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
if (mdp->cd->bculr) if (mdp->cd->bculr)
sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */

View File

@ -369,6 +369,8 @@ enum DESC_I_BIT {
DESC_I_RINT1 = 0x0001, DESC_I_RINT1 = 0x0001,
}; };
#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2)
/* RPADIR */ /* RPADIR */
enum RPADIR_BIT { enum RPADIR_BIT {
RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000, RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
@ -470,6 +472,9 @@ struct sh_eth_cpu_data {
unsigned long tx_check; unsigned long tx_check;
unsigned long eesr_err_check; unsigned long eesr_err_check;
/* Error mask */
unsigned long trscer_err_mask;
/* hardware features */ /* hardware features */
unsigned long irq_flags; /* IRQ configuration flags */ unsigned long irq_flags; /* IRQ configuration flags */
unsigned no_psr:1; /* EtherC DO NOT have PSR */ unsigned no_psr:1; /* EtherC DO NOT have PSR */

View File

@ -610,7 +610,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
/* Clear all mcast from ALE */ /* Clear all mcast from ALE */
cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS << cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
priv->host_port); priv->host_port, -1);
/* Flood All Unicast Packets to Host port */ /* Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@ -634,6 +634,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
static void cpsw_ndo_set_rx_mode(struct net_device *ndev) static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{ {
struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_priv *priv = netdev_priv(ndev);
int vid;
if (priv->data.dual_emac)
vid = priv->slaves[priv->emac_port].port_vlan;
else
vid = priv->data.default_vlan;
if (ndev->flags & IFF_PROMISC) { if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */ /* Enable promiscuous mode */
@ -649,7 +655,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
/* Clear all mcast from ALE */ /* Clear all mcast from ALE */
cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
vid);
if (!netdev_mc_empty(ndev)) { if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha; struct netdev_hw_addr *ha;

View File

@ -234,7 +234,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
} }
int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask) int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
{ {
u32 ale_entry[ALE_ENTRY_WORDS]; u32 ale_entry[ALE_ENTRY_WORDS];
int ret, idx; int ret, idx;
@ -245,6 +245,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR) if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
continue; continue;
/* if vid passed is -1 then remove all multicast entry from
* the table irrespective of vlan id, if a valid vlan id is
* passed then remove only multicast added to that vlan id.
* if vlan id doesn't match then move on to next entry.
*/
if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
continue;
if (cpsw_ale_get_mcast(ale_entry)) { if (cpsw_ale_get_mcast(ale_entry)) {
u8 addr[6]; u8 addr[6];

View File

@ -92,7 +92,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout); int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask); int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask); int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
int flags, u16 vid); int flags, u16 vid);
int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,

View File

@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind)
static void team_notify_peers_work(struct work_struct *work) static void team_notify_peers_work(struct work_struct *work)
{ {
struct team *team; struct team *team;
int val;
team = container_of(work, struct team, notify_peers.dw.work); team = container_of(work, struct team, notify_peers.dw.work);
@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work)
schedule_delayed_work(&team->notify_peers.dw, 0); schedule_delayed_work(&team->notify_peers.dw, 0);
return; return;
} }
val = atomic_dec_if_positive(&team->notify_peers.count_pending);
if (val < 0) {
rtnl_unlock();
return;
}
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
rtnl_unlock(); rtnl_unlock();
if (!atomic_dec_and_test(&team->notify_peers.count_pending)) if (val)
schedule_delayed_work(&team->notify_peers.dw, schedule_delayed_work(&team->notify_peers.dw,
msecs_to_jiffies(team->notify_peers.interval)); msecs_to_jiffies(team->notify_peers.interval));
} }
@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team)
static void team_mcast_rejoin_work(struct work_struct *work) static void team_mcast_rejoin_work(struct work_struct *work)
{ {
struct team *team; struct team *team;
int val;
team = container_of(work, struct team, mcast_rejoin.dw.work); team = container_of(work, struct team, mcast_rejoin.dw.work);
@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work)
schedule_delayed_work(&team->mcast_rejoin.dw, 0); schedule_delayed_work(&team->mcast_rejoin.dw, 0);
return; return;
} }
val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
if (val < 0) {
rtnl_unlock();
return;
}
call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
rtnl_unlock(); rtnl_unlock();
if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending)) if (val)
schedule_delayed_work(&team->mcast_rejoin.dw, schedule_delayed_work(&team->mcast_rejoin.dw,
msecs_to_jiffies(team->mcast_rejoin.interval)); msecs_to_jiffies(team->mcast_rejoin.interval));
} }

View File

@ -1276,7 +1276,7 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length)
awd.done = 0; awd.done = 0;
urb->context = &awd; urb->context = &awd;
status = usb_submit_urb(urb, GFP_NOIO); status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) { if (status) {
// something went wrong // something went wrong
usb_free_urb(urb); usb_free_urb(urb);

View File

@ -69,8 +69,8 @@
#include "iwl-agn-hw.h" #include "iwl-agn-hw.h"
/* Highest firmware API version supported */ /* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 10 #define IWL7260_UCODE_API_MAX 12
#define IWL3160_UCODE_API_MAX 10 #define IWL3160_UCODE_API_MAX 12
/* Oldest version we won't warn about */ /* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 10 #define IWL7260_UCODE_API_OK 10
@ -105,7 +105,7 @@
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
#define IWL7265D_FW_PRE "iwlwifi-7265D-" #define IWL7265D_FW_PRE "iwlwifi-7265D-"
#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" #define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_7000 0 #define NVM_HW_SECTION_NUM_FAMILY_7000 0

View File

@ -69,7 +69,7 @@
#include "iwl-agn-hw.h" #include "iwl-agn-hw.h"
/* Highest firmware API version supported */ /* Highest firmware API version supported */
#define IWL8000_UCODE_API_MAX 10 #define IWL8000_UCODE_API_MAX 12
/* Oldest version we won't warn about */ /* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 10 #define IWL8000_UCODE_API_OK 10

View File

@ -243,6 +243,9 @@ enum iwl_ucode_tlv_flag {
* @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif. * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan. * longer than the passive one, which is essential for fragmented scan.
* @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
* regardless of the band or the number of the probes. FW will calculate
* the actual dwell time.
*/ */
enum iwl_ucode_tlv_api { enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
@ -253,6 +256,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6), IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6),
IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
}; };
/** /**

View File

@ -672,6 +672,7 @@ struct iwl_scan_channel_opt {
* @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
* @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
* and DS parameter set IEs into probe requests. * and DS parameter set IEs into probe requests.
* @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
*/ */
enum iwl_mvm_lmac_scan_flags { enum iwl_mvm_lmac_scan_flags {
IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0), IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0),
@ -681,6 +682,7 @@ enum iwl_mvm_lmac_scan_flags {
IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4),
IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5),
IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6),
IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9),
}; };
enum iwl_scan_priority { enum iwl_scan_priority {

View File

@ -171,15 +171,21 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
* already included in the probe template, so we need to set only * already included in the probe template, so we need to set only
* req->n_ssids - 1 bits in addition to the first bit. * req->n_ssids - 1 bits in addition to the first bit.
*/ */
static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids) static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
enum ieee80211_band band, int n_ssids)
{ {
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
return 10;
if (band == IEEE80211_BAND_2GHZ) if (band == IEEE80211_BAND_2GHZ)
return 20 + 3 * (n_ssids + 1); return 20 + 3 * (n_ssids + 1);
return 10 + 2 * (n_ssids + 1); return 10 + 2 * (n_ssids + 1);
} }
static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band) static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
enum ieee80211_band band)
{ {
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
return 110;
return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
} }
@ -331,7 +337,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
*/ */
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
u32 passive_dwell = u32 passive_dwell =
iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ); iwl_mvm_get_passive_dwell(mvm,
IEEE80211_BAND_2GHZ);
params->max_out_time = passive_dwell; params->max_out_time = passive_dwell;
} else { } else {
params->passive_fragmented = true; params->passive_fragmented = true;
@ -348,8 +355,8 @@ not_bound:
params->dwell[band].passive = frag_passive_dwell; params->dwell[band].passive = frag_passive_dwell;
else else
params->dwell[band].passive = params->dwell[band].passive =
iwl_mvm_get_passive_dwell(band); iwl_mvm_get_passive_dwell(mvm, band);
params->dwell[band].active = iwl_mvm_get_active_dwell(band, params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
n_ssids); n_ssids);
} }
} }
@ -1448,6 +1455,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
if (iwl_mvm_scan_pass_all(mvm, req)) if (iwl_mvm_scan_pass_all(mvm, req))
flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL; flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
else
flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0) if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;

View File

@ -108,8 +108,12 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_flags &= ~TX_CMD_FLG_SEQ_CTL; tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
} }
/* tid_tspec will default to 0 = BE when QOS isn't enabled */ /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
else
ac = tid_to_mac80211_ac[0];
tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
TX_CMD_FLG_BT_PRIO_POS; TX_CMD_FLG_BT_PRIO_POS;

View File

@ -665,7 +665,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
if (num_of_ant(mvm->fw->valid_rx_ant) == 1) if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
return false; return false;
if (!mvm->cfg->rx_with_siso_diversity) if (mvm->cfg->rx_with_siso_diversity)
return false; return false;
ieee80211_iterate_active_interfaces_atomic( ieee80211_iterate_active_interfaces_atomic(

View File

@ -527,8 +527,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (cfg == &iwl7265_n_cfg) else if (cfg == &iwl7265_n_cfg)
cfg_7265d = &iwl7265d_n_cfg; cfg_7265d = &iwl7265d_n_cfg;
if (cfg_7265d && if (cfg_7265d &&
(iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
cfg = cfg_7265d; cfg = cfg_7265d;
iwl_trans->cfg = cfg_7265d;
}
#endif #endif
pci_set_drvdata(pdev, iwl_trans); pci_set_drvdata(pdev, iwl_trans);

View File

@ -666,7 +666,8 @@ tx_status_ok:
} }
static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
u8 *entry, int rxring_idx, int desc_idx) struct sk_buff *new_skb, u8 *entry,
int rxring_idx, int desc_idx)
{ {
struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@ -674,11 +675,15 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
u8 tmp_one = 1; u8 tmp_one = 1;
struct sk_buff *skb; struct sk_buff *skb;
if (likely(new_skb)) {
skb = new_skb;
goto remap;
}
skb = dev_alloc_skb(rtlpci->rxbuffersize); skb = dev_alloc_skb(rtlpci->rxbuffersize);
if (!skb) if (!skb)
return 0; return 0;
rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
remap:
/* just set skb->cb to mapping addr for pci_unmap_single use */ /* just set skb->cb to mapping addr for pci_unmap_single use */
*((dma_addr_t *)skb->cb) = *((dma_addr_t *)skb->cb) =
pci_map_single(rtlpci->pdev, skb_tail_pointer(skb), pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
@ -686,6 +691,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
bufferaddress = *((dma_addr_t *)skb->cb); bufferaddress = *((dma_addr_t *)skb->cb);
if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
return 0; return 0;
rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
if (rtlpriv->use_new_trx_flow) { if (rtlpriv->use_new_trx_flow) {
rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
HW_DESC_RX_PREPARE, HW_DESC_RX_PREPARE,
@ -781,6 +787,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
/*rx pkt */ /*rx pkt */
struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[ struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
rtlpci->rx_ring[rxring_idx].idx]; rtlpci->rx_ring[rxring_idx].idx];
struct sk_buff *new_skb;
if (rtlpriv->use_new_trx_flow) { if (rtlpriv->use_new_trx_flow) {
rx_remained_cnt = rx_remained_cnt =
@ -807,6 +814,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb), pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
/* get a new skb - if fail, old one will be reused */
new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
if (unlikely(!new_skb)) {
pr_err("Allocation of new skb failed in %s\n",
__func__);
goto no_new;
}
if (rtlpriv->use_new_trx_flow) { if (rtlpriv->use_new_trx_flow) {
buffer_desc = buffer_desc =
&rtlpci->rx_ring[rxring_idx].buffer_desc &rtlpci->rx_ring[rxring_idx].buffer_desc
@ -911,14 +925,16 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
schedule_work(&rtlpriv->works.lps_change_work); schedule_work(&rtlpriv->works.lps_change_work);
} }
end: end:
skb = new_skb;
no_new:
if (rtlpriv->use_new_trx_flow) { if (rtlpriv->use_new_trx_flow) {
_rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc, _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
rxring_idx,
rtlpci->rx_ring[rxring_idx].idx);
} else {
_rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
rxring_idx, rxring_idx,
rtlpci->rx_ring[rxring_idx].idx);
} else {
_rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
rtlpci->rx_ring[rxring_idx].idx); rtlpci->rx_ring[rxring_idx].idx);
if (rtlpci->rx_ring[rxring_idx].idx == if (rtlpci->rx_ring[rxring_idx].idx ==
rtlpci->rxringcount - 1) rtlpci->rxringcount - 1)
rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
@ -1307,7 +1323,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
rtlpci->rx_ring[rxring_idx].idx = 0; rtlpci->rx_ring[rxring_idx].idx = 0;
for (i = 0; i < rtlpci->rxringcount; i++) { for (i = 0; i < rtlpci->rxringcount; i++) {
entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i]; entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
rxring_idx, i)) rxring_idx, i))
return -ENOMEM; return -ENOMEM;
} }
@ -1332,7 +1348,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
for (i = 0; i < rtlpci->rxringcount; i++) { for (i = 0; i < rtlpci->rxringcount; i++) {
entry = &rtlpci->rx_ring[rxring_idx].desc[i]; entry = &rtlpci->rx_ring[rxring_idx].desc[i];
if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
rxring_idx, i)) rxring_idx, i))
return -ENOMEM; return -ENOMEM;
} }

View File

@ -88,10 +88,8 @@ struct netfront_cb {
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
struct netfront_stats { struct netfront_stats {
u64 rx_packets; u64 packets;
u64 tx_packets; u64 bytes;
u64 rx_bytes;
u64 tx_bytes;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
}; };
@ -160,7 +158,8 @@ struct netfront_info {
struct netfront_queue *queues; struct netfront_queue *queues;
/* Statistics */ /* Statistics */
struct netfront_stats __percpu *stats; struct netfront_stats __percpu *rx_stats;
struct netfront_stats __percpu *tx_stats;
atomic_t rx_gso_checksum_fixup; atomic_t rx_gso_checksum_fixup;
}; };
@ -565,7 +564,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
unsigned short id; unsigned short id;
struct netfront_info *np = netdev_priv(dev); struct netfront_info *np = netdev_priv(dev);
struct netfront_stats *stats = this_cpu_ptr(np->stats); struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
struct xen_netif_tx_request *tx; struct xen_netif_tx_request *tx;
char *data = skb->data; char *data = skb->data;
RING_IDX i; RING_IDX i;
@ -672,10 +671,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (notify) if (notify)
notify_remote_via_irq(queue->tx_irq); notify_remote_via_irq(queue->tx_irq);
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&tx_stats->syncp);
stats->tx_bytes += skb->len; tx_stats->bytes += skb->len;
stats->tx_packets++; tx_stats->packets++;
u64_stats_update_end(&stats->syncp); u64_stats_update_end(&tx_stats->syncp);
/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
xennet_tx_buf_gc(queue); xennet_tx_buf_gc(queue);
@ -931,7 +930,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
static int handle_incoming_queue(struct netfront_queue *queue, static int handle_incoming_queue(struct netfront_queue *queue,
struct sk_buff_head *rxq) struct sk_buff_head *rxq)
{ {
struct netfront_stats *stats = this_cpu_ptr(queue->info->stats); struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
int packets_dropped = 0; int packets_dropped = 0;
struct sk_buff *skb; struct sk_buff *skb;
@ -952,10 +951,10 @@ static int handle_incoming_queue(struct netfront_queue *queue,
continue; continue;
} }
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&rx_stats->syncp);
stats->rx_packets++; rx_stats->packets++;
stats->rx_bytes += skb->len; rx_stats->bytes += skb->len;
u64_stats_update_end(&stats->syncp); u64_stats_update_end(&rx_stats->syncp);
/* Pass it up. */ /* Pass it up. */
napi_gro_receive(&queue->napi, skb); napi_gro_receive(&queue->napi, skb);
@ -1079,18 +1078,22 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
int cpu; int cpu;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu); struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
u64 rx_packets, rx_bytes, tx_packets, tx_bytes; u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
unsigned int start; unsigned int start;
do { do {
start = u64_stats_fetch_begin_irq(&stats->syncp); start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
tx_packets = tx_stats->packets;
tx_bytes = tx_stats->bytes;
} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
rx_packets = stats->rx_packets; do {
tx_packets = stats->tx_packets; start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
rx_bytes = stats->rx_bytes; rx_packets = rx_stats->packets;
tx_bytes = stats->tx_bytes; rx_bytes = rx_stats->bytes;
} while (u64_stats_fetch_retry_irq(&stats->syncp, start)); } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
tot->rx_packets += rx_packets; tot->rx_packets += rx_packets;
tot->tx_packets += tx_packets; tot->tx_packets += tx_packets;
@ -1275,6 +1278,15 @@ static const struct net_device_ops xennet_netdev_ops = {
#endif #endif
}; };
static void xennet_free_netdev(struct net_device *netdev)
{
struct netfront_info *np = netdev_priv(netdev);
free_percpu(np->rx_stats);
free_percpu(np->tx_stats);
free_netdev(netdev);
}
static struct net_device *xennet_create_dev(struct xenbus_device *dev) static struct net_device *xennet_create_dev(struct xenbus_device *dev)
{ {
int err; int err;
@ -1295,8 +1307,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
np->queues = NULL; np->queues = NULL;
err = -ENOMEM; err = -ENOMEM;
np->stats = netdev_alloc_pcpu_stats(struct netfront_stats); np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
if (np->stats == NULL) if (np->rx_stats == NULL)
goto exit;
np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
if (np->tx_stats == NULL)
goto exit; goto exit;
netdev->netdev_ops = &xennet_netdev_ops; netdev->netdev_ops = &xennet_netdev_ops;
@ -1327,7 +1342,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
return netdev; return netdev;
exit: exit:
free_netdev(netdev); xennet_free_netdev(netdev);
return ERR_PTR(err); return ERR_PTR(err);
} }
@ -1369,7 +1384,7 @@ static int netfront_probe(struct xenbus_device *dev,
return 0; return 0;
fail: fail:
free_netdev(netdev); xennet_free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL); dev_set_drvdata(&dev->dev, NULL);
return err; return err;
} }
@ -2189,9 +2204,7 @@ static int xennet_remove(struct xenbus_device *dev)
info->queues = NULL; info->queues = NULL;
} }
free_percpu(info->stats); xennet_free_netdev(info->netdev);
free_netdev(info->netdev);
return 0; return 0;
} }

View File

@ -852,11 +852,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* 3. Update dev->stats asynchronously and atomically, and define * 3. Update dev->stats asynchronously and atomically, and define
* neither operation. * neither operation.
* *
* int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid); * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
* If device support VLAN filtering this function is called when a * If device support VLAN filtering this function is called when a
* VLAN id is registered. * VLAN id is registered.
* *
* int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
* If device support VLAN filtering this function is called when a * If device support VLAN filtering this function is called when a
* VLAN id is unregistered. * VLAN id is unregistered.
* *
@ -2085,7 +2085,7 @@ extern rwlock_t dev_base_lock; /* Device list lock */
list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_in_bond_rcu(bond, slave) \ #define for_each_netdev_in_bond_rcu(bond, slave) \
for_each_netdev_rcu(&init_net, slave) \ for_each_netdev_rcu(&init_net, slave) \
if (netdev_master_upper_dev_get_rcu(slave) == bond) if (netdev_master_upper_dev_get_rcu(slave) == (bond))
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
static inline struct net_device *next_net_device(struct net_device *dev) static inline struct net_device *next_net_device(struct net_device *dev)

View File

@ -174,6 +174,10 @@ enum ovs_packet_attr {
OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */ OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */
OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_* OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_*
attributes. */ attributes. */
OVS_PACKET_ATTR_UNUSED1,
OVS_PACKET_ATTR_UNUSED2,
OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe,
error logging should be suppressed. */
__OVS_PACKET_ATTR_MAX __OVS_PACKET_ATTR_MAX
}; };

View File

@ -154,7 +154,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
dst = NULL; dst = NULL;
if (is_broadcast_ether_addr(dest)) { if (is_broadcast_ether_addr(dest)) {
if (p->flags & BR_PROXYARP && if (IS_ENABLED(CONFIG_INET) &&
p->flags & BR_PROXYARP &&
skb->protocol == htons(ETH_P_ARP)) skb->protocol == htons(ETH_P_ARP))
br_do_proxy_arp(skb, br, vid); br_do_proxy_arp(skb, br, vid);

View File

@ -2043,6 +2043,12 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
case NDTPA_BASE_REACHABLE_TIME: case NDTPA_BASE_REACHABLE_TIME:
NEIGH_VAR_SET(p, BASE_REACHABLE_TIME, NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
nla_get_msecs(tbp[i])); nla_get_msecs(tbp[i]));
/* update reachable_time as well, otherwise, the change will
* only be effective after the next time neigh_periodic_work
* decides to recompute it (can be multiple minutes)
*/
p->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
break; break;
case NDTPA_GC_STALETIME: case NDTPA_GC_STALETIME:
NEIGH_VAR_SET(p, GC_STALETIME, NEIGH_VAR_SET(p, GC_STALETIME,
@ -2921,6 +2927,31 @@ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
return ret; return ret;
} }
static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
struct neigh_parms *p = ctl->extra2;
int ret;
if (strcmp(ctl->procname, "base_reachable_time") == 0)
ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
else
ret = -1;
if (write && ret == 0) {
/* update reachable_time as well, otherwise, the change will
* only be effective after the next time neigh_periodic_work
* decides to recompute it
*/
p->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
}
return ret;
}
#define NEIGH_PARMS_DATA_OFFSET(index) \ #define NEIGH_PARMS_DATA_OFFSET(index) \
(&((struct neigh_parms *) 0)->data[index]) (&((struct neigh_parms *) 0)->data[index])
@ -3047,6 +3078,19 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
/* ReachableTime (in milliseconds) */ /* ReachableTime (in milliseconds) */
t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
} else {
/* Those handlers will update p->reachable_time after
* base_reachable_time(_ms) is set to ensure the new timer starts being
* applied after the next neighbour update instead of waiting for
* neigh_periodic_work to update its value (can be multiple minutes)
* So any handler that replaces them should do this as well
*/
/* ReachableTime */
t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
neigh_proc_base_reachable_time;
/* ReachableTime (in milliseconds) */
t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
neigh_proc_base_reachable_time;
} }
/* Don't export sysctls to unprivileged users */ /* Don't export sysctls to unprivileged users */

View File

@ -27,10 +27,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
memset(&mr, 0, sizeof(mr)); memset(&mr, 0, sizeof(mr));
if (priv->sreg_proto_min) { if (priv->sreg_proto_min) {
mr.range[0].min.all = (__force __be16) mr.range[0].min.all =
data[priv->sreg_proto_min].data[0]; *(__be16 *)&data[priv->sreg_proto_min].data[0];
mr.range[0].max.all = (__force __be16) mr.range[0].max.all =
data[priv->sreg_proto_max].data[0]; *(__be16 *)&data[priv->sreg_proto_max].data[0];
mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
} }

View File

@ -27,10 +27,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
memset(&range, 0, sizeof(range)); memset(&range, 0, sizeof(range));
if (priv->sreg_proto_min) { if (priv->sreg_proto_min) {
range.min_proto.all = (__force __be16) range.min_proto.all =
data[priv->sreg_proto_min].data[0]; *(__be16 *)&data[priv->sreg_proto_min].data[0];
range.max_proto.all = (__force __be16) range.max_proto.all =
data[priv->sreg_proto_max].data[0]; *(__be16 *)&data[priv->sreg_proto_max].data[0];
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
} }

View File

@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
struct nf_conn *ct; struct nf_conn *ct;
struct net *net; struct net *net;
*diff = 0;
#ifdef CONFIG_IP_VS_IPV6 #ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet, /* This application helper doesn't work with IPv6 yet,
* so turn this into a no-op for IPv6 packets * so turn this into a no-op for IPv6 packets
@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
return 1; return 1;
#endif #endif
*diff = 0;
/* Only useful for established sessions */ /* Only useful for established sessions */
if (cp->state != IP_VS_TCP_S_ESTABLISHED) if (cp->state != IP_VS_TCP_S_ESTABLISHED)
return 1; return 1;
@ -322,6 +322,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
struct ip_vs_conn *n_cp; struct ip_vs_conn *n_cp;
struct net *net; struct net *net;
/* no diff required for incoming packets */
*diff = 0;
#ifdef CONFIG_IP_VS_IPV6 #ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet, /* This application helper doesn't work with IPv6 yet,
* so turn this into a no-op for IPv6 packets * so turn this into a no-op for IPv6 packets
@ -330,9 +333,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
return 1; return 1;
#endif #endif
/* no diff required for incoming packets */
*diff = 0;
/* Only useful for established sessions */ /* Only useful for established sessions */
if (cp->state != IP_VS_TCP_S_ESTABLISHED) if (cp->state != IP_VS_TCP_S_ESTABLISHED)
return 1; return 1;

View File

@ -611,16 +611,15 @@ __nf_conntrack_confirm(struct sk_buff *skb)
*/ */
NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
pr_debug("Confirming conntrack %p\n", ct); pr_debug("Confirming conntrack %p\n", ct);
/* We have to check the DYING flag inside the lock to prevent /* We have to check the DYING flag after unlink to prevent
a race against nf_ct_get_next_corpse() possibly called from * a race against nf_ct_get_next_corpse() possibly called from
user context, else we insert an already 'dead' hash, blocking * user context, else we insert an already 'dead' hash, blocking
further use of that particular connection -JM */ * further use of that particular connection -JM.
*/
nf_ct_del_from_dying_or_unconfirmed_list(ct);
if (unlikely(nf_ct_is_dying(ct))) { if (unlikely(nf_ct_is_dying(ct)))
nf_conntrack_double_unlock(hash, reply_hash); goto out;
local_bh_enable();
return NF_ACCEPT;
}
/* See if there's one in the list already, including reverse: /* See if there's one in the list already, including reverse:
NAT could have grabbed it without realizing, since we're NAT could have grabbed it without realizing, since we're
@ -636,8 +635,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
goto out; goto out;
nf_ct_del_from_dying_or_unconfirmed_list(ct);
/* Timer relative to confirmation time, not original /* Timer relative to confirmation time, not original
setting time, otherwise we'd get timer wrap in setting time, otherwise we'd get timer wrap in
weird delay cases. */ weird delay cases. */
@ -673,6 +670,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
return NF_ACCEPT; return NF_ACCEPT;
out: out:
nf_ct_add_to_dying_list(ct);
nf_conntrack_double_unlock(hash, reply_hash); nf_conntrack_double_unlock(hash, reply_hash);
NF_CT_STAT_INC(net, insert_failed); NF_CT_STAT_INC(net, insert_failed);
local_bh_enable(); local_bh_enable();

View File

@ -713,16 +713,12 @@ static int nft_flush_table(struct nft_ctx *ctx)
struct nft_chain *chain, *nc; struct nft_chain *chain, *nc;
struct nft_set *set, *ns; struct nft_set *set, *ns;
list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { list_for_each_entry(chain, &ctx->table->chains, list) {
ctx->chain = chain; ctx->chain = chain;
err = nft_delrule_by_chain(ctx); err = nft_delrule_by_chain(ctx);
if (err < 0) if (err < 0)
goto out; goto out;
err = nft_delchain(ctx);
if (err < 0)
goto out;
} }
list_for_each_entry_safe(set, ns, &ctx->table->sets, list) { list_for_each_entry_safe(set, ns, &ctx->table->sets, list) {
@ -735,6 +731,14 @@ static int nft_flush_table(struct nft_ctx *ctx)
goto out; goto out;
} }
list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
ctx->chain = chain;
err = nft_delchain(ctx);
if (err < 0)
goto out;
}
err = nft_deltable(ctx); err = nft_deltable(ctx);
out: out:
return err; return err;

View File

@ -321,7 +321,8 @@ replay:
nlh = nlmsg_hdr(skb); nlh = nlmsg_hdr(skb);
err = 0; err = 0;
if (nlh->nlmsg_len < NLMSG_HDRLEN) { if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
skb->len < nlh->nlmsg_len) {
err = -EINVAL; err = -EINVAL;
goto ack; goto ack;
} }
@ -469,7 +470,7 @@ static int nfnetlink_bind(struct net *net, int group)
int type; int type;
if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
return -EINVAL; return 0;
type = nfnl_group2type[group]; type = nfnl_group2type[group];

View File

@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
} }
if (priv->sreg_proto_min) { if (priv->sreg_proto_min) {
range.min_proto.all = (__force __be16) range.min_proto.all =
data[priv->sreg_proto_min].data[0]; *(__be16 *)&data[priv->sreg_proto_min].data[0];
range.max_proto.all = (__force __be16) range.max_proto.all =
data[priv->sreg_proto_max].data[0]; *(__be16 *)&data[priv->sreg_proto_max].data[0];
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
} }

View File

@ -524,7 +524,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
struct vport *input_vport; struct vport *input_vport;
int len; int len;
int err; int err;
bool log = !a[OVS_FLOW_ATTR_PROBE]; bool log = !a[OVS_PACKET_ATTR_PROBE];
err = -EINVAL; err = -EINVAL;
if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
@ -610,6 +610,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
[OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN }, [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
[OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
}; };
static const struct genl_ops dp_packet_genl_ops[] = { static const struct genl_ops dp_packet_genl_ops[] = {

View File

@ -2517,7 +2517,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
err = -EINVAL; err = -EINVAL;
if (sock->type == SOCK_DGRAM) { if (sock->type == SOCK_DGRAM) {
offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
if (unlikely(offset) < 0) if (unlikely(offset < 0))
goto out_free; goto out_free;
} else { } else {
if (ll_header_truncated(dev, len)) if (ll_header_truncated(dev, len))

View File

@ -220,10 +220,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
struct sk_buff *skb; struct sk_buff *skb;
skb_queue_walk(&bcl->outqueue, skb) { skb_queue_walk(&bcl->outqueue, skb) {
if (more(buf_seqno(skb), after)) if (more(buf_seqno(skb), after)) {
tipc_link_retransmit(bcl, skb, mod(to - after));
break; break;
}
} }
tipc_link_retransmit(bcl, skb, mod(to - after));
} }
/** /**