forked from Minki/linux
Merge gitolite.kernel.org:/pub/scm/linux/kernel/git/davem/net
David writes: "Networking fixes: 1) Prefix length validation in xfrm layer, from Steffen Klassert. 2) TX status reporting fix in mac80211, from Andrei Otcheretianski. 3) Fix hangs due to TX_DROP in mac80211, from Bob Copeland. 4) Fix DMA error regression in b43, from Larry Finger. 5) Add input validation to xenvif_set_hash_mapping(), from Jan Beulich. 6) SMMU unmapping fix in hns driver, from Yunsheng Lin. 7) Bluetooh crash in unpairing on SMP, from Matias Karhumaa. 8) WoL handling fixes in the phy layer, from Heiner Kallweit. 9) Fix deadlock in bonding, from Mahesh Bandewar. 10) Fill ttl inherit infor in vxlan driver, from Hangbin Liu. 11) Fix TX timeouts during netpoll, from Michael Chan. 12) RXRPC layer fixes from David Howells. 13) Another batch of ndo_poll_controller() removals to deal with excessive resource consumption during load. From Eric Dumazet. 14) Fix a specific TIPC failure secnario, from LUU Duc Canh. 15) Really disable clocks in r8169 during suspend so that low power states can actually be reached. 16) Fix SYN backlog lockdep issue in tcp and dccp, from Eric Dumazet. 17) Fix RCU locking in netpoll SKB send, which shows up in bonding, from Dave Jones. 18) Fix TX stalls in r8169, from Heiner Kallweit. 19) Fix locksup in nfp due to control message storms, from Jakub Kicinski. 20) Various rmnet bug fixes from Subash Abhinov Kasiviswanathan and Sean Tranchetti. 21) Fix use after free in ip_cmsg_recv_dstaddr(), from Eric Dumazet." * gitolite.kernel.org:/pub/scm/linux/kernel/git/davem/net: (122 commits) ixgbe: check return value of napi_complete_done() sctp: fix fall-through annotation r8169: always autoneg on resume ipv4: fix use-after-free in ip_cmsg_recv_dstaddr() net: qualcomm: rmnet: Fix incorrect allocation flag in receive path net: qualcomm: rmnet: Fix incorrect allocation flag in transmit net: qualcomm: rmnet: Skip processing loopback packets net: systemport: Fix wake-up interrupt race during resume rtnl: limit IFLA_NUM_TX_QUEUES and IFLA_NUM_RX_QUEUES to 4096 bonding: fix warning message inet: make sure to grab rcu_read_lock before using ireq->ireq_opt nfp: avoid soft lockups under control message storm declance: Fix continuation with the adapter identification message net: fec: fix rare tx timeout r8169: fix network stalls due to missing bit TXCFG_AUTO_FIFO tun: napi flags belong to tfile tun: initialize napi_mutex unconditionally tun: remove unused parameters bond: take rcu lock in netpoll_send_skb_on_dev rtnetlink: Fail dump if target netnsid is invalid ...
This commit is contained in:
commit
cec4de302c
@ -425,7 +425,7 @@ tcp_mtu_probing - INTEGER
|
||||
1 - Disabled by default, enabled when an ICMP black hole detected
|
||||
2 - Always enabled, use initial MSS of tcp_base_mss.
|
||||
|
||||
tcp_probe_interval - INTEGER
|
||||
tcp_probe_interval - UNSIGNED INTEGER
|
||||
Controls how often to start TCP Packetization-Layer Path MTU
|
||||
Discovery reprobe. The default is reprobing every 10 minutes as
|
||||
per RFC4821.
|
||||
|
@ -2956,7 +2956,6 @@ F: include/linux/bcm963xx_tag.h
|
||||
|
||||
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
|
||||
M: Rasesh Mody <rasesh.mody@cavium.com>
|
||||
M: Harish Patil <harish.patil@cavium.com>
|
||||
M: Dept-GELinuxNICDev@cavium.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
@ -2977,6 +2976,7 @@ F: drivers/scsi/bnx2i/
|
||||
|
||||
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
|
||||
M: Ariel Elior <ariel.elior@cavium.com>
|
||||
M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
|
||||
M: everest-linux-l2@cavium.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
@ -5470,7 +5470,8 @@ S: Odd Fixes
|
||||
F: drivers/net/ethernet/agere/
|
||||
|
||||
ETHERNET BRIDGE
|
||||
M: Stephen Hemminger <stephen@networkplumber.org>
|
||||
M: Roopa Prabhu <roopa@cumulusnetworks.com>
|
||||
M: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
|
||||
L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://www.linuxfoundation.org/en/Net:Bridge
|
||||
@ -11979,7 +11980,7 @@ F: Documentation/scsi/LICENSE.qla4xxx
|
||||
F: drivers/scsi/qla4xxx/
|
||||
|
||||
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
|
||||
M: Harish Patil <harish.patil@cavium.com>
|
||||
M: Shahed Shaikh <Shahed.Shaikh@cavium.com>
|
||||
M: Manish Chopra <manish.chopra@cavium.com>
|
||||
M: Dept-GELinuxNICDev@cavium.com
|
||||
L: netdev@vger.kernel.org
|
||||
@ -11987,7 +11988,6 @@ S: Supported
|
||||
F: drivers/net/ethernet/qlogic/qlcnic/
|
||||
|
||||
QLOGIC QLGE 10Gb ETHERNET DRIVER
|
||||
M: Harish Patil <harish.patil@cavium.com>
|
||||
M: Manish Chopra <manish.chopra@cavium.com>
|
||||
M: Dept-GELinuxNICDev@cavium.com
|
||||
L: netdev@vger.kernel.org
|
||||
|
@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev,
|
||||
static void bond_slave_arr_handler(struct work_struct *work);
|
||||
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
|
||||
int mod);
|
||||
static void bond_netdev_notify_work(struct work_struct *work);
|
||||
|
||||
/*---------------------------- General routines -----------------------------*/
|
||||
|
||||
@ -1170,9 +1171,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
|
||||
}
|
||||
}
|
||||
|
||||
/* don't change skb->dev for link-local packets */
|
||||
if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
|
||||
/* Link-local multicast packets should be passed to the
|
||||
* stack on the link they arrive as well as pass them to the
|
||||
* bond-master device. These packets are mostly usable when
|
||||
* stack receives it with the link on which they arrive
|
||||
* (e.g. LLDP) they also must be available on master. Some of
|
||||
* the use cases include (but are not limited to): LLDP agents
|
||||
* that must be able to operate both on enslaved interfaces as
|
||||
* well as on bonds themselves; linux bridges that must be able
|
||||
* to process/pass BPDUs from attached bonds when any kind of
|
||||
* STP version is enabled on the network.
|
||||
*/
|
||||
if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
|
||||
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
|
||||
if (nskb) {
|
||||
nskb->dev = bond->dev;
|
||||
nskb->queue_mapping = 0;
|
||||
netif_rx(nskb);
|
||||
}
|
||||
return RX_HANDLER_PASS;
|
||||
}
|
||||
if (bond_should_deliver_exact_match(skb, slave, bond))
|
||||
return RX_HANDLER_EXACT;
|
||||
|
||||
@ -1269,6 +1288,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
|
||||
|
||||
return slave;
|
||||
}
|
||||
|
||||
@ -1276,6 +1297,7 @@ static void bond_free_slave(struct slave *slave)
|
||||
{
|
||||
struct bonding *bond = bond_get_bond_by_slave(slave);
|
||||
|
||||
cancel_delayed_work_sync(&slave->notify_work);
|
||||
if (BOND_MODE(bond) == BOND_MODE_8023AD)
|
||||
kfree(SLAVE_AD_INFO(slave));
|
||||
|
||||
@ -1297,39 +1319,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
|
||||
info->link_failure_count = slave->link_failure_count;
|
||||
}
|
||||
|
||||
static void bond_netdev_notify(struct net_device *dev,
|
||||
struct netdev_bonding_info *info)
|
||||
{
|
||||
rtnl_lock();
|
||||
netdev_bonding_info_change(dev, info);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void bond_netdev_notify_work(struct work_struct *_work)
|
||||
{
|
||||
struct netdev_notify_work *w =
|
||||
container_of(_work, struct netdev_notify_work, work.work);
|
||||
struct slave *slave = container_of(_work, struct slave,
|
||||
notify_work.work);
|
||||
|
||||
bond_netdev_notify(w->dev, &w->bonding_info);
|
||||
dev_put(w->dev);
|
||||
kfree(w);
|
||||
if (rtnl_trylock()) {
|
||||
struct netdev_bonding_info binfo;
|
||||
|
||||
bond_fill_ifslave(slave, &binfo.slave);
|
||||
bond_fill_ifbond(slave->bond, &binfo.master);
|
||||
netdev_bonding_info_change(slave->dev, &binfo);
|
||||
rtnl_unlock();
|
||||
} else {
|
||||
queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void bond_queue_slave_event(struct slave *slave)
|
||||
{
|
||||
struct bonding *bond = slave->bond;
|
||||
struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
|
||||
|
||||
if (!nnw)
|
||||
return;
|
||||
|
||||
dev_hold(slave->dev);
|
||||
nnw->dev = slave->dev;
|
||||
bond_fill_ifslave(slave, &nnw->bonding_info.slave);
|
||||
bond_fill_ifbond(bond, &nnw->bonding_info.master);
|
||||
INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
|
||||
|
||||
queue_delayed_work(slave->bond->wq, &nnw->work, 0);
|
||||
queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
|
||||
}
|
||||
|
||||
void bond_lower_state_changed(struct slave *slave)
|
||||
|
@ -2185,25 +2185,6 @@ error_drop_packet:
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void ena_netpoll(struct net_device *netdev)
|
||||
{
|
||||
struct ena_adapter *adapter = netdev_priv(netdev);
|
||||
int i;
|
||||
|
||||
/* Dont schedule NAPI if the driver is in the middle of reset
|
||||
* or netdev is down.
|
||||
*/
|
||||
|
||||
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
|
||||
test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
|
||||
return;
|
||||
|
||||
for (i = 0; i < adapter->num_queues; i++)
|
||||
napi_schedule(&adapter->ena_napi[i].napi);
|
||||
}
|
||||
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
||||
|
||||
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
@ -2369,9 +2350,6 @@ static const struct net_device_ops ena_netdev_ops = {
|
||||
.ndo_change_mtu = ena_change_mtu,
|
||||
.ndo_set_mac_address = NULL,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ena_netpoll,
|
||||
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
||||
};
|
||||
|
||||
static int ena_device_validate_params(struct ena_adapter *adapter,
|
||||
|
@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
|
||||
int i, ret;
|
||||
unsigned long esar_base;
|
||||
unsigned char *esar;
|
||||
const char *desc;
|
||||
|
||||
if (dec_lance_debug && version_printed++ == 0)
|
||||
printk(version);
|
||||
@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
|
||||
*/
|
||||
switch (type) {
|
||||
case ASIC_LANCE:
|
||||
printk("%s: IOASIC onboard LANCE", name);
|
||||
desc = "IOASIC onboard LANCE";
|
||||
break;
|
||||
case PMAD_LANCE:
|
||||
printk("%s: PMAD-AA", name);
|
||||
desc = "PMAD-AA";
|
||||
break;
|
||||
case PMAX_LANCE:
|
||||
printk("%s: PMAX onboard LANCE", name);
|
||||
desc = "PMAX onboard LANCE";
|
||||
break;
|
||||
}
|
||||
for (i = 0; i < 6; i++)
|
||||
dev->dev_addr[i] = esar[i * 4];
|
||||
|
||||
printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
|
||||
printk("%s: %s, addr = %pM, irq = %d\n",
|
||||
name, desc, dev->dev_addr, dev->irq);
|
||||
|
||||
dev->netdev_ops = &lance_netdev_ops;
|
||||
dev->watchdog_timeo = 5*HZ;
|
||||
|
@ -1069,9 +1069,6 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
/* Stop monitoring MPD interrupt */
|
||||
intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
|
||||
|
||||
/* Disable RXCHK, active filters and Broadcom tag matching */
|
||||
reg = rxchk_readl(priv, RXCHK_CONTROL);
|
||||
reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
|
||||
@ -1081,6 +1078,17 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
|
||||
/* Clear the MagicPacket detection logic */
|
||||
mpd_enable_set(priv, false);
|
||||
|
||||
reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
|
||||
if (reg & INTRL2_0_MPD)
|
||||
netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
|
||||
|
||||
if (reg & INTRL2_0_BRCM_MATCH_TAG) {
|
||||
reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
|
||||
RXCHK_BRCM_TAG_MATCH_MASK;
|
||||
netdev_info(priv->netdev,
|
||||
"Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
|
||||
}
|
||||
|
||||
netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
|
||||
}
|
||||
|
||||
@ -1105,7 +1113,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
|
||||
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
||||
struct bcm_sysport_tx_ring *txr;
|
||||
unsigned int ring, ring_bit;
|
||||
u32 reg;
|
||||
|
||||
priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
|
||||
~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
|
||||
@ -1131,16 +1138,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
|
||||
if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
|
||||
bcm_sysport_tx_reclaim_all(priv);
|
||||
|
||||
if (priv->irq0_stat & INTRL2_0_MPD)
|
||||
netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
|
||||
|
||||
if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) {
|
||||
reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
|
||||
RXCHK_BRCM_TAG_MATCH_MASK;
|
||||
netdev_info(priv->netdev,
|
||||
"Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
|
||||
}
|
||||
|
||||
if (!priv->is_lite)
|
||||
goto out;
|
||||
|
||||
@ -2641,9 +2638,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
|
||||
/* UniMAC receive needs to be turned on */
|
||||
umac_enable_set(priv, CMD_RX_EN, 1);
|
||||
|
||||
/* Enable the interrupt wake-up source */
|
||||
intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
|
||||
|
||||
netif_dbg(priv, wol, ndev, "entered WOL mode\n");
|
||||
|
||||
return 0;
|
||||
|
@ -1884,8 +1884,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
|
||||
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
|
||||
tx_pkts++;
|
||||
/* return full budget so NAPI will complete. */
|
||||
if (unlikely(tx_pkts > bp->tx_wake_thresh))
|
||||
if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
|
||||
rx_pkts = budget;
|
||||
raw_cons = NEXT_RAW_CMP(raw_cons);
|
||||
break;
|
||||
}
|
||||
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
|
||||
if (likely(budget))
|
||||
rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
|
||||
@ -1913,7 +1916,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
|
||||
}
|
||||
raw_cons = NEXT_RAW_CMP(raw_cons);
|
||||
|
||||
if (rx_pkts == budget)
|
||||
if (rx_pkts && rx_pkts == budget)
|
||||
break;
|
||||
}
|
||||
|
||||
@ -2027,8 +2030,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
|
||||
while (1) {
|
||||
work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
|
||||
|
||||
if (work_done >= budget)
|
||||
if (work_done >= budget) {
|
||||
if (!budget)
|
||||
BNXT_CP_DB_REARM(cpr->cp_doorbell,
|
||||
cpr->cp_raw_cons);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!bnxt_has_work(bp, cpr)) {
|
||||
if (napi_complete_done(napi, work_done))
|
||||
|
@ -2160,6 +2160,7 @@ static void macb_configure_dma(struct macb *bp)
|
||||
else
|
||||
dmacfg &= ~GEM_BIT(TXCOEN);
|
||||
|
||||
dmacfg &= ~GEM_BIT(ADDR64);
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
|
||||
dmacfg |= GEM_BIT(ADDR64);
|
||||
|
@ -1158,7 +1158,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
|
||||
napi_disable(&fep->napi);
|
||||
netif_tx_lock_bh(ndev);
|
||||
fec_restart(ndev);
|
||||
netif_wake_queue(ndev);
|
||||
netif_tx_wake_all_queues(ndev);
|
||||
netif_tx_unlock_bh(ndev);
|
||||
napi_enable(&fep->napi);
|
||||
}
|
||||
@ -1273,7 +1273,7 @@ skb_done:
|
||||
|
||||
/* Since we have freed up a buffer, the ring is no longer full
|
||||
*/
|
||||
if (netif_queue_stopped(ndev)) {
|
||||
if (netif_tx_queue_stopped(nq)) {
|
||||
entries_free = fec_enet_get_free_txdesc_num(txq);
|
||||
if (entries_free >= txq->tx_wake_threshold)
|
||||
netif_tx_wake_queue(nq);
|
||||
@ -1746,7 +1746,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
|
||||
napi_disable(&fep->napi);
|
||||
netif_tx_lock_bh(ndev);
|
||||
fec_restart(ndev);
|
||||
netif_wake_queue(ndev);
|
||||
netif_tx_wake_all_queues(ndev);
|
||||
netif_tx_unlock_bh(ndev);
|
||||
napi_enable(&fep->napi);
|
||||
}
|
||||
@ -2247,7 +2247,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
|
||||
napi_disable(&fep->napi);
|
||||
netif_tx_lock_bh(ndev);
|
||||
fec_restart(ndev);
|
||||
netif_wake_queue(ndev);
|
||||
netif_tx_wake_all_queues(ndev);
|
||||
netif_tx_unlock_bh(ndev);
|
||||
napi_enable(&fep->napi);
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
|
||||
if (cb->type == DESC_TYPE_SKB)
|
||||
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
|
||||
ring_to_dma_dir(ring));
|
||||
else
|
||||
else if (cb->length)
|
||||
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
|
||||
ring_to_dma_dir(ring));
|
||||
}
|
||||
|
@ -40,9 +40,9 @@
|
||||
#define SKB_TMP_LEN(SKB) \
|
||||
(((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
|
||||
|
||||
static void fill_v2_desc(struct hnae_ring *ring, void *priv,
|
||||
int size, dma_addr_t dma, int frag_end,
|
||||
int buf_num, enum hns_desc_type type, int mtu)
|
||||
static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
|
||||
int send_sz, dma_addr_t dma, int frag_end,
|
||||
int buf_num, enum hns_desc_type type, int mtu)
|
||||
{
|
||||
struct hnae_desc *desc = &ring->desc[ring->next_to_use];
|
||||
struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
|
||||
@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
|
||||
desc_cb->type = type;
|
||||
|
||||
desc->addr = cpu_to_le64(dma);
|
||||
desc->tx.send_size = cpu_to_le16((u16)size);
|
||||
desc->tx.send_size = cpu_to_le16((u16)send_sz);
|
||||
|
||||
/* config bd buffer end */
|
||||
hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
|
||||
@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
|
||||
ring_ptr_move_fw(ring, next_to_use);
|
||||
}
|
||||
|
||||
static void fill_v2_desc(struct hnae_ring *ring, void *priv,
|
||||
int size, dma_addr_t dma, int frag_end,
|
||||
int buf_num, enum hns_desc_type type, int mtu)
|
||||
{
|
||||
fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
|
||||
buf_num, type, mtu);
|
||||
}
|
||||
|
||||
static const struct acpi_device_id hns_enet_acpi_match[] = {
|
||||
{ "HISI00C1", 0 },
|
||||
{ "HISI00C2", 0 },
|
||||
@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
|
||||
|
||||
/* when the frag size is bigger than hardware, split this frag */
|
||||
for (k = 0; k < frag_buf_num; k++)
|
||||
fill_v2_desc(ring, priv,
|
||||
(k == frag_buf_num - 1) ?
|
||||
fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
|
||||
(k == frag_buf_num - 1) ?
|
||||
sizeoflast : BD_MAX_SEND_SIZE,
|
||||
dma + BD_MAX_SEND_SIZE * k,
|
||||
frag_end && (k == frag_buf_num - 1) ? 1 : 0,
|
||||
buf_num,
|
||||
(type == DESC_TYPE_SKB && !k) ?
|
||||
dma + BD_MAX_SEND_SIZE * k,
|
||||
frag_end && (k == frag_buf_num - 1) ? 1 : 0,
|
||||
buf_num,
|
||||
(type == DESC_TYPE_SKB && !k) ?
|
||||
DESC_TYPE_SKB : DESC_TYPE_PAGE,
|
||||
mtu);
|
||||
mtu);
|
||||
}
|
||||
|
||||
netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
|
||||
@ -1495,21 +1503,6 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
|
||||
return phy_mii_ioctl(phy_dev, ifr, cmd);
|
||||
}
|
||||
|
||||
/* use only for netconsole to poll with the device without interrupt */
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void hns_nic_poll_controller(struct net_device *ndev)
|
||||
{
|
||||
struct hns_nic_priv *priv = netdev_priv(ndev);
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
local_irq_save(flags);
|
||||
for (i = 0; i < priv->ae_handle->q_num * 2; i++)
|
||||
napi_schedule(&priv->ring_data[i].napi);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
@ -1962,9 +1955,6 @@ static const struct net_device_ops hns_nic_netdev_ops = {
|
||||
.ndo_set_features = hns_nic_set_features,
|
||||
.ndo_fix_features = hns_nic_fix_features,
|
||||
.ndo_get_stats64 = hns_nic_get_stats64,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = hns_nic_poll_controller,
|
||||
#endif
|
||||
.ndo_set_rx_mode = hns_nic_set_rx_mode,
|
||||
.ndo_select_queue = hns_nic_select_queue,
|
||||
};
|
||||
|
@ -789,23 +789,6 @@ static void hinic_get_stats64(struct net_device *netdev,
|
||||
stats->tx_errors = nic_tx_stats->tx_dropped;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void hinic_netpoll(struct net_device *netdev)
|
||||
{
|
||||
struct hinic_dev *nic_dev = netdev_priv(netdev);
|
||||
int i, num_qps;
|
||||
|
||||
num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
|
||||
for (i = 0; i < num_qps; i++) {
|
||||
struct hinic_txq *txq = &nic_dev->txqs[i];
|
||||
struct hinic_rxq *rxq = &nic_dev->rxqs[i];
|
||||
|
||||
napi_schedule(&txq->napi);
|
||||
napi_schedule(&rxq->napi);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct net_device_ops hinic_netdev_ops = {
|
||||
.ndo_open = hinic_open,
|
||||
.ndo_stop = hinic_close,
|
||||
@ -818,9 +801,6 @@ static const struct net_device_ops hinic_netdev_ops = {
|
||||
.ndo_start_xmit = hinic_xmit_frame,
|
||||
.ndo_tx_timeout = hinic_tx_timeout,
|
||||
.ndo_get_stats64 = hinic_get_stats64,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = hinic_netpoll,
|
||||
#endif
|
||||
};
|
||||
|
||||
static void netdev_features_init(struct net_device *netdev)
|
||||
|
@ -921,17 +921,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
|
||||
return rx;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void ehea_netpoll(struct net_device *dev)
|
||||
{
|
||||
struct ehea_port *port = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < port->num_def_qps; i++)
|
||||
napi_schedule(&port->port_res[i].napi);
|
||||
}
|
||||
#endif
|
||||
|
||||
static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
|
||||
{
|
||||
struct ehea_port_res *pr = param;
|
||||
@ -2953,9 +2942,6 @@ static const struct net_device_ops ehea_netdev_ops = {
|
||||
.ndo_open = ehea_open,
|
||||
.ndo_stop = ehea_stop,
|
||||
.ndo_start_xmit = ehea_start_xmit,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ehea_netpoll,
|
||||
#endif
|
||||
.ndo_get_stats64 = ehea_get_stats64,
|
||||
.ndo_set_mac_address = ehea_set_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
|
@ -2207,19 +2207,6 @@ restart_poll:
|
||||
return frames_processed;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void ibmvnic_netpoll_controller(struct net_device *dev)
|
||||
{
|
||||
struct ibmvnic_adapter *adapter = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
replenish_pools(netdev_priv(dev));
|
||||
for (i = 0; i < adapter->req_rx_queues; i++)
|
||||
ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
|
||||
adapter->rx_scrq[i]);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int wait_for_reset(struct ibmvnic_adapter *adapter)
|
||||
{
|
||||
int rc, ret;
|
||||
@ -2292,9 +2279,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
|
||||
.ndo_set_mac_address = ibmvnic_set_mac,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_tx_timeout = ibmvnic_tx_timeout,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ibmvnic_netpoll_controller,
|
||||
#endif
|
||||
.ndo_change_mtu = ibmvnic_change_mtu,
|
||||
.ndo_features_check = ibmvnic_features_check,
|
||||
};
|
||||
|
@ -3196,11 +3196,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
|
||||
return budget;
|
||||
|
||||
/* all work done, exit the polling mode */
|
||||
napi_complete_done(napi, work_done);
|
||||
if (adapter->rx_itr_setting & 1)
|
||||
ixgbe_set_itr(q_vector);
|
||||
if (!test_bit(__IXGBE_DOWN, &adapter->state))
|
||||
ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
|
||||
if (likely(napi_complete_done(napi, work_done))) {
|
||||
if (adapter->rx_itr_setting & 1)
|
||||
ixgbe_set_itr(q_vector);
|
||||
if (!test_bit(__IXGBE_DOWN, &adapter->state))
|
||||
ixgbe_irq_enable_queues(adapter,
|
||||
BIT_ULL(q_vector->v_idx));
|
||||
}
|
||||
|
||||
return min(work_done, budget - 1);
|
||||
}
|
||||
|
@ -54,6 +54,7 @@
|
||||
#include "en_stats.h"
|
||||
#include "en/fs.h"
|
||||
|
||||
extern const struct net_device_ops mlx5e_netdev_ops;
|
||||
struct page_pool;
|
||||
|
||||
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
|
||||
|
@ -16,6 +16,8 @@ struct mlx5e_tc_table {
|
||||
|
||||
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
|
||||
DECLARE_HASHTABLE(hairpin_tbl, 8);
|
||||
|
||||
struct notifier_block netdevice_nb;
|
||||
};
|
||||
|
||||
struct mlx5e_flow_table {
|
||||
|
@ -4315,7 +4315,7 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
|
||||
}
|
||||
}
|
||||
|
||||
static const struct net_device_ops mlx5e_netdev_ops = {
|
||||
const struct net_device_ops mlx5e_netdev_ops = {
|
||||
.ndo_open = mlx5e_open,
|
||||
.ndo_stop = mlx5e_close,
|
||||
.ndo_start_xmit = mlx5e_xmit,
|
||||
|
@ -1368,6 +1368,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
||||
|
||||
*match_level = MLX5_MATCH_L2;
|
||||
}
|
||||
} else {
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
|
||||
}
|
||||
|
||||
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
|
||||
@ -2946,14 +2949,71 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
|
||||
struct mlx5e_priv *peer_priv)
|
||||
{
|
||||
struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
|
||||
struct mlx5e_hairpin_entry *hpe;
|
||||
u16 peer_vhca_id;
|
||||
int bkt;
|
||||
|
||||
if (!same_hw_devs(priv, peer_priv))
|
||||
return;
|
||||
|
||||
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
|
||||
|
||||
hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
|
||||
if (hpe->peer_vhca_id == peer_vhca_id)
|
||||
hpe->hp->pair->peer_gone = true;
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5e_tc_netdev_event(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
|
||||
struct mlx5e_flow_steering *fs;
|
||||
struct mlx5e_priv *peer_priv;
|
||||
struct mlx5e_tc_table *tc;
|
||||
struct mlx5e_priv *priv;
|
||||
|
||||
if (ndev->netdev_ops != &mlx5e_netdev_ops ||
|
||||
event != NETDEV_UNREGISTER ||
|
||||
ndev->reg_state == NETREG_REGISTERED)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
|
||||
fs = container_of(tc, struct mlx5e_flow_steering, tc);
|
||||
priv = container_of(fs, struct mlx5e_priv, fs);
|
||||
peer_priv = netdev_priv(ndev);
|
||||
if (priv == peer_priv ||
|
||||
!(priv->netdev->features & NETIF_F_HW_TC))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_tc_table *tc = &priv->fs.tc;
|
||||
int err;
|
||||
|
||||
hash_init(tc->mod_hdr_tbl);
|
||||
hash_init(tc->hairpin_tbl);
|
||||
|
||||
return rhashtable_init(&tc->ht, &tc_ht_params);
|
||||
err = rhashtable_init(&tc->ht, &tc_ht_params);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
|
||||
if (register_netdevice_notifier(&tc->netdevice_nb)) {
|
||||
tc->netdevice_nb.notifier_call = NULL;
|
||||
mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void _mlx5e_tc_del_flow(void *ptr, void *arg)
|
||||
@ -2969,6 +3029,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_tc_table *tc = &priv->fs.tc;
|
||||
|
||||
if (tc->netdevice_nb.notifier_call)
|
||||
unregister_netdevice_notifier(&tc->netdevice_nb);
|
||||
|
||||
rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
|
||||
|
||||
if (!IS_ERR_OR_NULL(tc->t)) {
|
||||
|
@ -2000,7 +2000,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
|
||||
u32 max_guarantee = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= esw->total_vports; i++) {
|
||||
for (i = 0; i < esw->total_vports; i++) {
|
||||
evport = &esw->vports[i];
|
||||
if (!evport->enabled || evport->info.min_rate < max_guarantee)
|
||||
continue;
|
||||
@ -2020,7 +2020,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
|
||||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= esw->total_vports; i++) {
|
||||
for (i = 0; i < esw->total_vports; i++) {
|
||||
evport = &esw->vports[i];
|
||||
if (!evport->enabled)
|
||||
continue;
|
||||
|
@ -475,7 +475,8 @@ static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
|
||||
|
||||
for (i = 0; i < hp->num_channels; i++) {
|
||||
mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
|
||||
mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
|
||||
if (!hp->peer_gone)
|
||||
mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -567,6 +568,8 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
|
||||
MLX5_RQC_STATE_RST, 0, 0);
|
||||
|
||||
/* unset peer SQs */
|
||||
if (hp->peer_gone)
|
||||
return;
|
||||
for (i = 0; i < hp->num_channels; i++)
|
||||
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
|
||||
MLX5_SQC_STATE_RST, 0, 0);
|
||||
|
@ -2077,14 +2077,17 @@ nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
|
||||
static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
|
||||
{
|
||||
struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
|
||||
struct nfp_net *nn = r_vec->nfp_net;
|
||||
struct nfp_net_dp *dp = &nn->dp;
|
||||
unsigned int budget = 512;
|
||||
|
||||
while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
|
||||
while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
|
||||
continue;
|
||||
|
||||
return budget;
|
||||
}
|
||||
|
||||
static void nfp_ctrl_poll(unsigned long arg)
|
||||
@ -2096,9 +2099,13 @@ static void nfp_ctrl_poll(unsigned long arg)
|
||||
__nfp_ctrl_tx_queued(r_vec);
|
||||
spin_unlock_bh(&r_vec->lock);
|
||||
|
||||
nfp_ctrl_rx(r_vec);
|
||||
|
||||
nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
|
||||
if (nfp_ctrl_rx(r_vec)) {
|
||||
nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
|
||||
} else {
|
||||
tasklet_schedule(&r_vec->tasklet);
|
||||
nn_dp_warn(&r_vec->nfp_net->dp,
|
||||
"control message budget exceeded!\n");
|
||||
}
|
||||
}
|
||||
|
||||
/* Setup and Configuration
|
||||
|
@ -72,9 +72,6 @@ static void netxen_schedule_work(struct netxen_adapter *adapter,
|
||||
work_func_t func, int delay);
|
||||
static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
|
||||
static int netxen_nic_poll(struct napi_struct *napi, int budget);
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void netxen_nic_poll_controller(struct net_device *netdev);
|
||||
#endif
|
||||
|
||||
static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
|
||||
static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
|
||||
@ -581,9 +578,6 @@ static const struct net_device_ops netxen_netdev_ops = {
|
||||
.ndo_tx_timeout = netxen_tx_timeout,
|
||||
.ndo_fix_features = netxen_fix_features,
|
||||
.ndo_set_features = netxen_set_features,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = netxen_nic_poll_controller,
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline bool netxen_function_zero(struct pci_dev *pdev)
|
||||
@ -2402,23 +2396,6 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
|
||||
return work_done;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void netxen_nic_poll_controller(struct net_device *netdev)
|
||||
{
|
||||
int ring;
|
||||
struct nx_host_sds_ring *sds_ring;
|
||||
struct netxen_adapter *adapter = netdev_priv(netdev);
|
||||
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
|
||||
|
||||
disable_irq(adapter->irq);
|
||||
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
|
||||
sds_ring = &recv_ctx->sds_rings[ring];
|
||||
netxen_intr(adapter->irq, sds_ring);
|
||||
}
|
||||
enable_irq(adapter->irq);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
|
||||
{
|
||||
|
@ -11987,6 +11987,7 @@ struct public_global {
|
||||
u32 running_bundle_id;
|
||||
s32 external_temperature;
|
||||
u32 mdump_reason;
|
||||
u64 reserved;
|
||||
u32 data_ptr;
|
||||
u32 data_size;
|
||||
};
|
||||
|
@ -1710,7 +1710,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
|
||||
|
||||
cm_info->local_ip[0] = ntohl(iph->daddr);
|
||||
cm_info->remote_ip[0] = ntohl(iph->saddr);
|
||||
cm_info->ip_version = TCP_IPV4;
|
||||
cm_info->ip_version = QED_TCP_IPV4;
|
||||
|
||||
ip_hlen = (iph->ihl) * sizeof(u32);
|
||||
*payload_len = ntohs(iph->tot_len) - ip_hlen;
|
||||
@ -1730,7 +1730,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
|
||||
cm_info->remote_ip[i] =
|
||||
ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
|
||||
}
|
||||
cm_info->ip_version = TCP_IPV6;
|
||||
cm_info->ip_version = QED_TCP_IPV6;
|
||||
|
||||
ip_hlen = sizeof(*ip6h);
|
||||
*payload_len = ntohs(ip6h->payload_len);
|
||||
|
@ -228,7 +228,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
|
||||
num_cons, "Toggle");
|
||||
if (rc) {
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
|
||||
"Failed to allocate toogle bits, rc = %d\n", rc);
|
||||
"Failed to allocate toggle bits, rc = %d\n", rc);
|
||||
goto free_cq_map;
|
||||
}
|
||||
|
||||
|
@ -140,23 +140,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
|
||||
|
||||
static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
|
||||
{
|
||||
enum roce_flavor flavor;
|
||||
|
||||
switch (roce_mode) {
|
||||
case ROCE_V1:
|
||||
flavor = PLAIN_ROCE;
|
||||
break;
|
||||
return PLAIN_ROCE;
|
||||
case ROCE_V2_IPV4:
|
||||
flavor = RROCE_IPV4;
|
||||
break;
|
||||
return RROCE_IPV4;
|
||||
case ROCE_V2_IPV6:
|
||||
flavor = ROCE_V2_IPV6;
|
||||
break;
|
||||
return RROCE_IPV6;
|
||||
default:
|
||||
flavor = MAX_ROCE_MODE;
|
||||
break;
|
||||
return MAX_ROCE_FLAVOR;
|
||||
}
|
||||
return flavor;
|
||||
}
|
||||
|
||||
static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
|
||||
|
@ -154,7 +154,7 @@ qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
|
||||
static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
|
||||
struct qed_tunnel_info *p_src)
|
||||
{
|
||||
enum tunnel_clss type;
|
||||
int type;
|
||||
|
||||
p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
|
||||
p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
|
||||
|
@ -413,7 +413,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
|
||||
}
|
||||
|
||||
if (!p_iov->b_pre_fp_hsi &&
|
||||
ETH_HSI_VER_MINOR &&
|
||||
(resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
|
||||
DP_INFO(p_hwfn,
|
||||
"PF is using older fastpath HSI; %02x.%02x is configured\n",
|
||||
@ -572,7 +571,7 @@ free_p_iov:
|
||||
static void
|
||||
__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
|
||||
struct qed_tunn_update_type *p_src,
|
||||
enum qed_tunn_clss mask, u8 *p_cls)
|
||||
enum qed_tunn_mode mask, u8 *p_cls)
|
||||
{
|
||||
if (p_src->b_update_mode) {
|
||||
p_req->tun_mode_update_mask |= BIT(mask);
|
||||
@ -587,7 +586,7 @@ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
|
||||
static void
|
||||
qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
|
||||
struct qed_tunn_update_type *p_src,
|
||||
enum qed_tunn_clss mask,
|
||||
enum qed_tunn_mode mask,
|
||||
u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
|
||||
u8 *p_update_port, u16 *p_udp_port)
|
||||
{
|
||||
|
@ -1800,7 +1800,8 @@ struct qlcnic_hardware_ops {
|
||||
int (*config_loopback) (struct qlcnic_adapter *, u8);
|
||||
int (*clear_loopback) (struct qlcnic_adapter *, u8);
|
||||
int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
|
||||
void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
|
||||
void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
|
||||
u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
|
||||
int (*get_board_info) (struct qlcnic_adapter *);
|
||||
void (*set_mac_filter_count) (struct qlcnic_adapter *);
|
||||
void (*free_mac_list) (struct qlcnic_adapter *);
|
||||
@ -2064,9 +2065,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
|
||||
}
|
||||
|
||||
static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
|
||||
u64 *addr, u16 id)
|
||||
u64 *addr, u16 vlan,
|
||||
struct qlcnic_host_tx_ring *tx_ring)
|
||||
{
|
||||
adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
|
||||
adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
|
||||
}
|
||||
|
||||
static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
|
||||
|
@ -2135,7 +2135,8 @@ out:
|
||||
}
|
||||
|
||||
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
|
||||
u16 vlan_id)
|
||||
u16 vlan_id,
|
||||
struct qlcnic_host_tx_ring *tx_ring)
|
||||
{
|
||||
u8 mac[ETH_ALEN];
|
||||
memcpy(&mac, addr, ETH_ALEN);
|
||||
|
@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
|
||||
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
|
||||
int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
|
||||
int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
|
||||
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
|
||||
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
|
||||
u16 vlan, struct qlcnic_host_tx_ring *ring);
|
||||
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
|
||||
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
|
||||
void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
|
||||
|
@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
|
||||
struct net_device *netdev);
|
||||
void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
|
||||
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
|
||||
u64 *uaddr, u16 vlan_id);
|
||||
u64 *uaddr, u16 vlan_id,
|
||||
struct qlcnic_host_tx_ring *tx_ring);
|
||||
int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
|
||||
struct ethtool_coalesce *);
|
||||
int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
|
||||
|
@ -268,13 +268,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
|
||||
}
|
||||
|
||||
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
|
||||
u16 vlan_id)
|
||||
u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
|
||||
{
|
||||
struct cmd_desc_type0 *hwdesc;
|
||||
struct qlcnic_nic_req *req;
|
||||
struct qlcnic_mac_req *mac_req;
|
||||
struct qlcnic_vlan_req *vlan_req;
|
||||
struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
|
||||
u32 producer;
|
||||
u64 word;
|
||||
|
||||
@ -301,7 +300,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
|
||||
|
||||
static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
|
||||
struct cmd_desc_type0 *first_desc,
|
||||
struct sk_buff *skb)
|
||||
struct sk_buff *skb,
|
||||
struct qlcnic_host_tx_ring *tx_ring)
|
||||
{
|
||||
struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
|
||||
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
|
||||
@ -335,7 +335,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
|
||||
tmp_fil->vlan_id == vlan_id) {
|
||||
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
|
||||
qlcnic_change_filter(adapter, &src_addr,
|
||||
vlan_id);
|
||||
vlan_id, tx_ring);
|
||||
tmp_fil->ftime = jiffies;
|
||||
return;
|
||||
}
|
||||
@ -350,7 +350,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
|
||||
if (!fil)
|
||||
return;
|
||||
|
||||
qlcnic_change_filter(adapter, &src_addr, vlan_id);
|
||||
qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
|
||||
fil->ftime = jiffies;
|
||||
fil->vlan_id = vlan_id;
|
||||
memcpy(fil->faddr, &src_addr, ETH_ALEN);
|
||||
@ -766,7 +766,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
}
|
||||
|
||||
if (adapter->drv_mac_learn)
|
||||
qlcnic_send_filter(adapter, first_desc, skb);
|
||||
qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
|
||||
|
||||
tx_ring->tx_stats.tx_bytes += skb->len;
|
||||
tx_ring->tx_stats.xmit_called++;
|
||||
|
@ -59,9 +59,6 @@ static int qlcnic_close(struct net_device *netdev);
|
||||
static void qlcnic_tx_timeout(struct net_device *netdev);
|
||||
static void qlcnic_attach_work(struct work_struct *work);
|
||||
static void qlcnic_fwinit_work(struct work_struct *work);
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void qlcnic_poll_controller(struct net_device *netdev);
|
||||
#endif
|
||||
|
||||
static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
|
||||
static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
|
||||
@ -545,9 +542,6 @@ static const struct net_device_ops qlcnic_netdev_ops = {
|
||||
.ndo_udp_tunnel_add = qlcnic_add_vxlan_port,
|
||||
.ndo_udp_tunnel_del = qlcnic_del_vxlan_port,
|
||||
.ndo_features_check = qlcnic_features_check,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = qlcnic_poll_controller,
|
||||
#endif
|
||||
#ifdef CONFIG_QLCNIC_SRIOV
|
||||
.ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
|
||||
.ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate,
|
||||
@ -3200,45 +3194,6 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void qlcnic_poll_controller(struct net_device *netdev)
|
||||
{
|
||||
struct qlcnic_adapter *adapter = netdev_priv(netdev);
|
||||
struct qlcnic_host_sds_ring *sds_ring;
|
||||
struct qlcnic_recv_context *recv_ctx;
|
||||
struct qlcnic_host_tx_ring *tx_ring;
|
||||
int ring;
|
||||
|
||||
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
|
||||
return;
|
||||
|
||||
recv_ctx = adapter->recv_ctx;
|
||||
|
||||
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
|
||||
sds_ring = &recv_ctx->sds_rings[ring];
|
||||
qlcnic_disable_sds_intr(adapter, sds_ring);
|
||||
napi_schedule(&sds_ring->napi);
|
||||
}
|
||||
|
||||
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
|
||||
/* Only Multi-Tx queue capable devices need to
|
||||
* schedule NAPI for TX rings
|
||||
*/
|
||||
if ((qlcnic_83xx_check(adapter) &&
|
||||
(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
|
||||
(qlcnic_82xx_check(adapter) &&
|
||||
!qlcnic_check_multi_tx(adapter)))
|
||||
return;
|
||||
|
||||
for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
|
||||
tx_ring = &adapter->tx_ring[ring];
|
||||
qlcnic_disable_tx_intr(adapter, tx_ring);
|
||||
napi_schedule(&tx_ring->napi);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void
|
||||
qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
|
||||
{
|
||||
|
@ -113,7 +113,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
|
||||
struct sk_buff *skbn;
|
||||
|
||||
if (skb->dev->type == ARPHRD_ETHER) {
|
||||
if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
|
||||
if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
@ -147,7 +147,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
if (skb_headroom(skb) < required_headroom) {
|
||||
if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
|
||||
if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -189,6 +189,9 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
|
||||
if (!skb)
|
||||
goto done;
|
||||
|
||||
if (skb->pkt_type == PACKET_LOOPBACK)
|
||||
return RX_HANDLER_PASS;
|
||||
|
||||
dev = skb->dev;
|
||||
port = rmnet_get_port(dev);
|
||||
|
||||
|
@ -4072,13 +4072,12 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
|
||||
|
||||
genphy_soft_reset(dev->phydev);
|
||||
|
||||
/* It was reported that chip version 33 ends up with 10MBit/Half on a
|
||||
/* It was reported that several chips end up with 10MBit/Half on a
|
||||
* 1GBit link after resuming from S3. For whatever reason the PHY on
|
||||
* this chip doesn't properly start a renegotiation when soft-reset.
|
||||
* these chips doesn't properly start a renegotiation when soft-reset.
|
||||
* Explicitly requesting a renegotiation fixes this.
|
||||
*/
|
||||
if (tp->mac_version == RTL_GIGA_MAC_VER_33 &&
|
||||
dev->phydev->autoneg == AUTONEG_ENABLE)
|
||||
if (dev->phydev->autoneg == AUTONEG_ENABLE)
|
||||
phy_restart_aneg(dev->phydev);
|
||||
}
|
||||
|
||||
@ -4536,9 +4535,14 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
|
||||
|
||||
static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
|
||||
{
|
||||
/* Set DMA burst size and Interframe Gap Time */
|
||||
RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
|
||||
(InterFrameGap << TxInterFrameGapShift));
|
||||
u32 val = TX_DMA_BURST << TxDMAShift |
|
||||
InterFrameGap << TxInterFrameGapShift;
|
||||
|
||||
if (tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
|
||||
tp->mac_version != RTL_GIGA_MAC_VER_39)
|
||||
val |= TXCFG_AUTO_FIFO;
|
||||
|
||||
RTL_W32(tp, TxConfig, val);
|
||||
}
|
||||
|
||||
static void rtl_set_rx_max_size(struct rtl8169_private *tp)
|
||||
@ -5033,7 +5037,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
|
||||
|
||||
rtl_disable_clock_request(tp);
|
||||
|
||||
RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
|
||||
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
|
||||
|
||||
/* Adjust EEE LED frequency */
|
||||
@ -5067,7 +5070,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
|
||||
|
||||
rtl_disable_clock_request(tp);
|
||||
|
||||
RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
|
||||
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
|
||||
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
|
||||
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
|
||||
@ -5112,8 +5114,6 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
|
||||
|
||||
static void rtl_hw_start_8168g(struct rtl8169_private *tp)
|
||||
{
|
||||
RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
|
||||
|
||||
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
|
||||
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
|
||||
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
|
||||
@ -5211,8 +5211,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
|
||||
rtl_hw_aspm_clkreq_enable(tp, false);
|
||||
rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
|
||||
|
||||
RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
|
||||
|
||||
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
|
||||
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
|
||||
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
|
||||
@ -5295,8 +5293,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
|
||||
{
|
||||
rtl8168ep_stop_cmac(tp);
|
||||
|
||||
RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
|
||||
|
||||
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
|
||||
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC);
|
||||
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
|
||||
@ -5618,7 +5614,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
|
||||
/* Force LAN exit from ASPM if Rx/Tx are not idle */
|
||||
RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
|
||||
|
||||
RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
|
||||
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
|
||||
|
||||
rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
|
||||
@ -6869,8 +6864,10 @@ static int rtl8169_suspend(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct net_device *dev = pci_get_drvdata(pdev);
|
||||
struct rtl8169_private *tp = netdev_priv(dev);
|
||||
|
||||
rtl8169_net_suspend(dev);
|
||||
clk_disable_unprepare(tp->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -6898,6 +6895,9 @@ static int rtl8169_resume(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct net_device *dev = pci_get_drvdata(pdev);
|
||||
struct rtl8169_private *tp = netdev_priv(dev);
|
||||
|
||||
clk_prepare_enable(tp->clk);
|
||||
|
||||
if (netif_running(dev))
|
||||
__rtl8169_resume(dev);
|
||||
|
@ -2206,29 +2206,6 @@ static void efx_fini_napi(struct efx_nic *efx)
|
||||
efx_fini_napi_channel(channel);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Kernel netpoll interface
|
||||
*
|
||||
*************************************************************************/
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
|
||||
/* Although in the common case interrupts will be disabled, this is not
|
||||
* guaranteed. However, all our work happens inside the NAPI callback,
|
||||
* so no locking is required.
|
||||
*/
|
||||
static void efx_netpoll(struct net_device *net_dev)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
struct efx_channel *channel;
|
||||
|
||||
efx_for_each_channel(channel, efx)
|
||||
efx_schedule_channel(channel);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Kernel net device interface
|
||||
@ -2509,9 +2486,6 @@ static const struct net_device_ops efx_netdev_ops = {
|
||||
#endif
|
||||
.ndo_get_phys_port_id = efx_get_phys_port_id,
|
||||
.ndo_get_phys_port_name = efx_get_phys_port_name,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = efx_netpoll,
|
||||
#endif
|
||||
.ndo_setup_tc = efx_setup_tc,
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
.ndo_rx_flow_steer = efx_filter_rfs,
|
||||
|
@ -2052,29 +2052,6 @@ static void ef4_fini_napi(struct ef4_nic *efx)
|
||||
ef4_fini_napi_channel(channel);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Kernel netpoll interface
|
||||
*
|
||||
*************************************************************************/
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
|
||||
/* Although in the common case interrupts will be disabled, this is not
|
||||
* guaranteed. However, all our work happens inside the NAPI callback,
|
||||
* so no locking is required.
|
||||
*/
|
||||
static void ef4_netpoll(struct net_device *net_dev)
|
||||
{
|
||||
struct ef4_nic *efx = netdev_priv(net_dev);
|
||||
struct ef4_channel *channel;
|
||||
|
||||
ef4_for_each_channel(channel, efx)
|
||||
ef4_schedule_channel(channel);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Kernel net device interface
|
||||
@ -2250,9 +2227,6 @@ static const struct net_device_ops ef4_netdev_ops = {
|
||||
.ndo_set_mac_address = ef4_set_mac_address,
|
||||
.ndo_set_rx_mode = ef4_set_rx_mode,
|
||||
.ndo_set_features = ef4_set_features,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ef4_netpoll,
|
||||
#endif
|
||||
.ndo_setup_tc = ef4_setup_tc,
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
.ndo_rx_flow_steer = ef4_filter_rfs,
|
||||
|
@ -1308,8 +1308,7 @@ static int adf7242_remove(struct spi_device *spi)
|
||||
{
|
||||
struct adf7242_local *lp = spi_get_drvdata(spi);
|
||||
|
||||
if (!IS_ERR_OR_NULL(lp->debugfs_root))
|
||||
debugfs_remove_recursive(lp->debugfs_root);
|
||||
debugfs_remove_recursive(lp->debugfs_root);
|
||||
|
||||
cancel_delayed_work_sync(&lp->work);
|
||||
destroy_workqueue(lp->wqueue);
|
||||
|
@ -634,10 +634,9 @@ static int ca8210_test_int_driver_write(
|
||||
for (i = 0; i < len; i++)
|
||||
dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]);
|
||||
|
||||
fifo_buffer = kmalloc(len, GFP_KERNEL);
|
||||
fifo_buffer = kmemdup(buf, len, GFP_KERNEL);
|
||||
if (!fifo_buffer)
|
||||
return -ENOMEM;
|
||||
memcpy(fifo_buffer, buf, len);
|
||||
kfifo_in(&test->up_fifo, &fifo_buffer, 4);
|
||||
wake_up_interruptible(&priv->test.readq);
|
||||
|
||||
@ -3044,8 +3043,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv)
|
||||
{
|
||||
struct ca8210_test *test = &priv->test;
|
||||
|
||||
if (!IS_ERR(test->ca8210_dfs_spi_int))
|
||||
debugfs_remove(test->ca8210_dfs_spi_int);
|
||||
debugfs_remove(test->ca8210_dfs_spi_int);
|
||||
kfifo_free(&test->up_fifo);
|
||||
dev_info(&priv->spi->dev, "Test interface removed\n");
|
||||
}
|
||||
|
@ -903,19 +903,19 @@ mcr20a_irq_clean_complete(void *context)
|
||||
|
||||
switch (seq_state) {
|
||||
/* TX IRQ, RX IRQ and SEQ IRQ */
|
||||
case (0x03):
|
||||
case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
|
||||
if (lp->is_tx) {
|
||||
lp->is_tx = 0;
|
||||
dev_dbg(printdev(lp), "TX is done. No ACK\n");
|
||||
mcr20a_handle_tx_complete(lp);
|
||||
}
|
||||
break;
|
||||
case (0x05):
|
||||
case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
|
||||
/* rx is starting */
|
||||
dev_dbg(printdev(lp), "RX is starting\n");
|
||||
mcr20a_handle_rx(lp);
|
||||
break;
|
||||
case (0x07):
|
||||
case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
|
||||
if (lp->is_tx) {
|
||||
/* tx is done */
|
||||
lp->is_tx = 0;
|
||||
@ -927,7 +927,7 @@ mcr20a_irq_clean_complete(void *context)
|
||||
mcr20a_handle_rx(lp);
|
||||
}
|
||||
break;
|
||||
case (0x01):
|
||||
case (DAR_IRQSTS1_SEQIRQ):
|
||||
if (lp->is_tx) {
|
||||
dev_dbg(printdev(lp), "TX is starting\n");
|
||||
mcr20a_handle_tx(lp);
|
||||
|
@ -93,7 +93,12 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
|
||||
if (!netdev)
|
||||
return !phydev->suspended;
|
||||
|
||||
/* Don't suspend PHY if the attached netdev parent may wakeup.
|
||||
if (netdev->wol_enabled)
|
||||
return false;
|
||||
|
||||
/* As long as not all affected network drivers support the
|
||||
* wol_enabled flag, let's check for hints that WoL is enabled.
|
||||
* Don't suspend PHY if the attached netdev parent may wake up.
|
||||
* The parent may point to a PCI device, as in tg3 driver.
|
||||
*/
|
||||
if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent))
|
||||
@ -1132,9 +1137,9 @@ void phy_detach(struct phy_device *phydev)
|
||||
sysfs_remove_link(&dev->dev.kobj, "phydev");
|
||||
sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev");
|
||||
}
|
||||
phy_suspend(phydev);
|
||||
phydev->attached_dev->phydev = NULL;
|
||||
phydev->attached_dev = NULL;
|
||||
phy_suspend(phydev);
|
||||
phydev->phylink = NULL;
|
||||
|
||||
phy_led_triggers_unregister(phydev);
|
||||
@ -1168,12 +1173,13 @@ EXPORT_SYMBOL(phy_detach);
|
||||
int phy_suspend(struct phy_device *phydev)
|
||||
{
|
||||
struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
|
||||
struct net_device *netdev = phydev->attached_dev;
|
||||
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
|
||||
int ret = 0;
|
||||
|
||||
/* If the device has WOL enabled, we cannot suspend the PHY */
|
||||
phy_ethtool_get_wol(phydev, &wol);
|
||||
if (wol.wolopts)
|
||||
if (wol.wolopts || (netdev && netdev->wol_enabled))
|
||||
return -EBUSY;
|
||||
|
||||
if (phydev->drv && phydrv->suspend)
|
||||
|
@ -1098,8 +1098,11 @@ static int sfp_hwmon_insert(struct sfp *sfp)
|
||||
|
||||
static void sfp_hwmon_remove(struct sfp *sfp)
|
||||
{
|
||||
hwmon_device_unregister(sfp->hwmon_dev);
|
||||
kfree(sfp->hwmon_name);
|
||||
if (!IS_ERR_OR_NULL(sfp->hwmon_dev)) {
|
||||
hwmon_device_unregister(sfp->hwmon_dev);
|
||||
sfp->hwmon_dev = NULL;
|
||||
kfree(sfp->hwmon_name);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static int sfp_hwmon_insert(struct sfp *sfp)
|
||||
|
@ -181,6 +181,7 @@ struct tun_file {
|
||||
};
|
||||
struct napi_struct napi;
|
||||
bool napi_enabled;
|
||||
bool napi_frags_enabled;
|
||||
struct mutex napi_mutex; /* Protects access to the above napi */
|
||||
struct list_head next;
|
||||
struct tun_struct *detached;
|
||||
@ -313,32 +314,32 @@ static int tun_napi_poll(struct napi_struct *napi, int budget)
|
||||
}
|
||||
|
||||
static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
|
||||
bool napi_en)
|
||||
bool napi_en, bool napi_frags)
|
||||
{
|
||||
tfile->napi_enabled = napi_en;
|
||||
tfile->napi_frags_enabled = napi_en && napi_frags;
|
||||
if (napi_en) {
|
||||
netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
|
||||
NAPI_POLL_WEIGHT);
|
||||
napi_enable(&tfile->napi);
|
||||
mutex_init(&tfile->napi_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile)
|
||||
static void tun_napi_disable(struct tun_file *tfile)
|
||||
{
|
||||
if (tfile->napi_enabled)
|
||||
napi_disable(&tfile->napi);
|
||||
}
|
||||
|
||||
static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile)
|
||||
static void tun_napi_del(struct tun_file *tfile)
|
||||
{
|
||||
if (tfile->napi_enabled)
|
||||
netif_napi_del(&tfile->napi);
|
||||
}
|
||||
|
||||
static bool tun_napi_frags_enabled(const struct tun_struct *tun)
|
||||
static bool tun_napi_frags_enabled(const struct tun_file *tfile)
|
||||
{
|
||||
return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS;
|
||||
return tfile->napi_frags_enabled;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TUN_VNET_CROSS_LE
|
||||
@ -690,8 +691,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
|
||||
tun = rtnl_dereference(tfile->tun);
|
||||
|
||||
if (tun && clean) {
|
||||
tun_napi_disable(tun, tfile);
|
||||
tun_napi_del(tun, tfile);
|
||||
tun_napi_disable(tfile);
|
||||
tun_napi_del(tfile);
|
||||
}
|
||||
|
||||
if (tun && !tfile->detached) {
|
||||
@ -758,7 +759,7 @@ static void tun_detach_all(struct net_device *dev)
|
||||
for (i = 0; i < n; i++) {
|
||||
tfile = rtnl_dereference(tun->tfiles[i]);
|
||||
BUG_ON(!tfile);
|
||||
tun_napi_disable(tun, tfile);
|
||||
tun_napi_disable(tfile);
|
||||
tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
|
||||
tfile->socket.sk->sk_data_ready(tfile->socket.sk);
|
||||
RCU_INIT_POINTER(tfile->tun, NULL);
|
||||
@ -774,7 +775,7 @@ static void tun_detach_all(struct net_device *dev)
|
||||
synchronize_net();
|
||||
for (i = 0; i < n; i++) {
|
||||
tfile = rtnl_dereference(tun->tfiles[i]);
|
||||
tun_napi_del(tun, tfile);
|
||||
tun_napi_del(tfile);
|
||||
/* Drop read queue */
|
||||
tun_queue_purge(tfile);
|
||||
xdp_rxq_info_unreg(&tfile->xdp_rxq);
|
||||
@ -793,7 +794,7 @@ static void tun_detach_all(struct net_device *dev)
|
||||
}
|
||||
|
||||
static int tun_attach(struct tun_struct *tun, struct file *file,
|
||||
bool skip_filter, bool napi)
|
||||
bool skip_filter, bool napi, bool napi_frags)
|
||||
{
|
||||
struct tun_file *tfile = file->private_data;
|
||||
struct net_device *dev = tun->dev;
|
||||
@ -866,7 +867,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
|
||||
tun_enable_queue(tfile);
|
||||
} else {
|
||||
sock_hold(&tfile->sk);
|
||||
tun_napi_init(tun, tfile, napi);
|
||||
tun_napi_init(tun, tfile, napi, napi_frags);
|
||||
}
|
||||
|
||||
tun_set_real_num_queues(tun);
|
||||
@ -1709,7 +1710,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
int err;
|
||||
u32 rxhash = 0;
|
||||
int skb_xdp = 1;
|
||||
bool frags = tun_napi_frags_enabled(tun);
|
||||
bool frags = tun_napi_frags_enabled(tfile);
|
||||
|
||||
if (!(tun->dev->flags & IFF_UP))
|
||||
return -EIO;
|
||||
@ -2534,7 +2535,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
||||
return err;
|
||||
|
||||
err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
|
||||
ifr->ifr_flags & IFF_NAPI);
|
||||
ifr->ifr_flags & IFF_NAPI,
|
||||
ifr->ifr_flags & IFF_NAPI_FRAGS);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
@ -2632,7 +2634,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
||||
(ifr->ifr_flags & TUN_FEATURES);
|
||||
|
||||
INIT_LIST_HEAD(&tun->disabled);
|
||||
err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI);
|
||||
err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
|
||||
ifr->ifr_flags & IFF_NAPI_FRAGS);
|
||||
if (err < 0)
|
||||
goto err_free_flow;
|
||||
|
||||
@ -2781,7 +2784,8 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
|
||||
ret = security_tun_dev_attach_queue(tun->security);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI);
|
||||
ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
|
||||
tun->flags & IFF_NAPI_FRAGS);
|
||||
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
|
||||
tun = rtnl_dereference(tfile->tun);
|
||||
if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
|
||||
@ -3199,6 +3203,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mutex_init(&tfile->napi_mutex);
|
||||
RCU_INIT_POINTER(tfile->tun, NULL);
|
||||
tfile->flags = 0;
|
||||
tfile->ifindex = 0;
|
||||
|
@ -607,6 +607,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
|
||||
struct usbnet *dev = netdev_priv(net);
|
||||
u8 opt = 0;
|
||||
|
||||
if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
|
||||
return -EINVAL;
|
||||
|
||||
if (wolinfo->wolopts & WAKE_PHY)
|
||||
opt |= AX_MONITOR_LINK;
|
||||
if (wolinfo->wolopts & WAKE_MAGIC)
|
||||
|
@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
|
||||
struct usbnet *dev = netdev_priv(net);
|
||||
u8 opt = 0;
|
||||
|
||||
if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
|
||||
return -EINVAL;
|
||||
|
||||
if (wolinfo->wolopts & WAKE_PHY)
|
||||
opt |= AX_MONITOR_MODE_RWLC;
|
||||
if (wolinfo->wolopts & WAKE_MAGIC)
|
||||
|
@ -1401,19 +1401,10 @@ static int lan78xx_set_wol(struct net_device *netdev,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
pdata->wol = 0;
|
||||
if (wol->wolopts & WAKE_UCAST)
|
||||
pdata->wol |= WAKE_UCAST;
|
||||
if (wol->wolopts & WAKE_MCAST)
|
||||
pdata->wol |= WAKE_MCAST;
|
||||
if (wol->wolopts & WAKE_BCAST)
|
||||
pdata->wol |= WAKE_BCAST;
|
||||
if (wol->wolopts & WAKE_MAGIC)
|
||||
pdata->wol |= WAKE_MAGIC;
|
||||
if (wol->wolopts & WAKE_PHY)
|
||||
pdata->wol |= WAKE_PHY;
|
||||
if (wol->wolopts & WAKE_ARP)
|
||||
pdata->wol |= WAKE_ARP;
|
||||
if (wol->wolopts & ~WAKE_ALL)
|
||||
return -EINVAL;
|
||||
|
||||
pdata->wol = wol->wolopts;
|
||||
|
||||
device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
|
||||
|
||||
|
@ -4506,6 +4506,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||
if (!rtl_can_wakeup(tp))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (wol->wolopts & ~WAKE_ANY)
|
||||
return -EINVAL;
|
||||
|
||||
ret = usb_autopm_get_interface(tp->intf);
|
||||
if (ret < 0)
|
||||
goto out_set_wol;
|
||||
|
@ -731,6 +731,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
|
||||
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
|
||||
int ret;
|
||||
|
||||
if (wolinfo->wolopts & ~SUPPORTED_WAKE)
|
||||
return -EINVAL;
|
||||
|
||||
pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
|
||||
|
||||
ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
|
||||
|
@ -774,6 +774,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
|
||||
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
|
||||
int ret;
|
||||
|
||||
if (wolinfo->wolopts & ~SUPPORTED_WAKE)
|
||||
return -EINVAL;
|
||||
|
||||
pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
|
||||
|
||||
ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
|
||||
|
@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
|
||||
struct usbnet *dev = netdev_priv(net);
|
||||
u8 opt = 0;
|
||||
|
||||
if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
|
||||
return -EINVAL;
|
||||
|
||||
if (wolinfo->wolopts & WAKE_PHY)
|
||||
opt |= SR_MONITOR_LINK;
|
||||
if (wolinfo->wolopts & WAKE_MAGIC)
|
||||
|
@ -1699,17 +1699,6 @@ static void virtnet_stats(struct net_device *dev,
|
||||
tot->rx_frame_errors = dev->stats.rx_frame_errors;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void virtnet_netpoll(struct net_device *dev)
|
||||
{
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vi->curr_queue_pairs; i++)
|
||||
napi_schedule(&vi->rq[i].napi);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void virtnet_ack_link_announce(struct virtnet_info *vi)
|
||||
{
|
||||
rtnl_lock();
|
||||
@ -2447,9 +2436,6 @@ static const struct net_device_ops virtnet_netdev = {
|
||||
.ndo_get_stats64 = virtnet_stats,
|
||||
.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = virtnet_netpoll,
|
||||
#endif
|
||||
.ndo_bpf = virtnet_xdp,
|
||||
.ndo_xdp_xmit = virtnet_xdp_xmit,
|
||||
.ndo_features_check = passthru_features_check,
|
||||
|
@ -3539,6 +3539,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
|
||||
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
|
||||
nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
|
||||
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
|
||||
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */
|
||||
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
|
||||
nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
|
||||
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
|
||||
@ -3603,6 +3604,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
||||
}
|
||||
|
||||
if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
|
||||
nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
|
||||
!!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
|
||||
nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
|
||||
nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
|
||||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
|
||||
|
@ -257,7 +257,7 @@ static const struct
|
||||
[I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO },
|
||||
[I2400M_MS_BUSY] = { "busy", -EBUSY },
|
||||
[I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ },
|
||||
[I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ },
|
||||
[I2400M_MS_UNINITIALIZED] = { "uninitialized", -EILSEQ },
|
||||
[I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO },
|
||||
[I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO },
|
||||
[I2400M_MS_NO_RF] = { "no RF", -EIO },
|
||||
|
@ -1518,13 +1518,15 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
|
||||
}
|
||||
} else {
|
||||
/* More than a single header/data pair were missed.
|
||||
* Report this error, and reset the controller to
|
||||
* Report this error. If running with open-source
|
||||
* firmware, then reset the controller to
|
||||
* revive operation.
|
||||
*/
|
||||
b43dbg(dev->wl,
|
||||
"Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
|
||||
ring->index, firstused, slot);
|
||||
b43_controller_restart(dev, "Out of order TX");
|
||||
if (dev->fw.opensource)
|
||||
b43_controller_restart(dev, "Out of order TX");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -51,6 +51,7 @@
|
||||
|
||||
static const struct iwl_base_params iwl1000_base_params = {
|
||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||
.max_tfd_queue_size = 256,
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||
.pll_cfg = true,
|
||||
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
|
||||
|
@ -520,7 +520,6 @@ struct mac80211_hwsim_data {
|
||||
int channels, idx;
|
||||
bool use_chanctx;
|
||||
bool destroy_on_close;
|
||||
struct work_struct destroy_work;
|
||||
u32 portid;
|
||||
char alpha2[2];
|
||||
const struct ieee80211_regdomain *regd;
|
||||
@ -2935,8 +2934,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
|
||||
hwsim_radios_generation++;
|
||||
spin_unlock_bh(&hwsim_radio_lock);
|
||||
|
||||
if (idx > 0)
|
||||
hwsim_mcast_new_radio(idx, info, param);
|
||||
hwsim_mcast_new_radio(idx, info, param);
|
||||
|
||||
return idx;
|
||||
|
||||
@ -3565,30 +3563,27 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
|
||||
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
|
||||
};
|
||||
|
||||
static void destroy_radio(struct work_struct *work)
|
||||
{
|
||||
struct mac80211_hwsim_data *data =
|
||||
container_of(work, struct mac80211_hwsim_data, destroy_work);
|
||||
|
||||
hwsim_radios_generation++;
|
||||
mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL);
|
||||
}
|
||||
|
||||
static void remove_user_radios(u32 portid)
|
||||
{
|
||||
struct mac80211_hwsim_data *entry, *tmp;
|
||||
LIST_HEAD(list);
|
||||
|
||||
spin_lock_bh(&hwsim_radio_lock);
|
||||
list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
|
||||
if (entry->destroy_on_close && entry->portid == portid) {
|
||||
list_del(&entry->list);
|
||||
list_move(&entry->list, &list);
|
||||
rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
|
||||
hwsim_rht_params);
|
||||
INIT_WORK(&entry->destroy_work, destroy_radio);
|
||||
queue_work(hwsim_wq, &entry->destroy_work);
|
||||
hwsim_radios_generation++;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&hwsim_radio_lock);
|
||||
|
||||
list_for_each_entry_safe(entry, tmp, &list, list) {
|
||||
list_del(&entry->list);
|
||||
mac80211_hwsim_del_radio(entry, wiphy_name(entry->hw->wiphy),
|
||||
NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
|
||||
@ -3646,6 +3641,7 @@ static __net_init int hwsim_init_net(struct net *net)
|
||||
static void __net_exit hwsim_exit_net(struct net *net)
|
||||
{
|
||||
struct mac80211_hwsim_data *data, *tmp;
|
||||
LIST_HEAD(list);
|
||||
|
||||
spin_lock_bh(&hwsim_radio_lock);
|
||||
list_for_each_entry_safe(data, tmp, &hwsim_radios, list) {
|
||||
@ -3656,17 +3652,19 @@ static void __net_exit hwsim_exit_net(struct net *net)
|
||||
if (data->netgroup == hwsim_net_get_netgroup(&init_net))
|
||||
continue;
|
||||
|
||||
list_del(&data->list);
|
||||
list_move(&data->list, &list);
|
||||
rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
|
||||
hwsim_rht_params);
|
||||
hwsim_radios_generation++;
|
||||
spin_unlock_bh(&hwsim_radio_lock);
|
||||
}
|
||||
spin_unlock_bh(&hwsim_radio_lock);
|
||||
|
||||
list_for_each_entry_safe(data, tmp, &list, list) {
|
||||
list_del(&data->list);
|
||||
mac80211_hwsim_del_radio(data,
|
||||
wiphy_name(data->hw->wiphy),
|
||||
NULL);
|
||||
spin_lock_bh(&hwsim_radio_lock);
|
||||
}
|
||||
spin_unlock_bh(&hwsim_radio_lock);
|
||||
|
||||
ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
|
||||
}
|
||||
|
@ -77,9 +77,8 @@ static void mt76x0_remove_interface(struct ieee80211_hw *hw,
|
||||
{
|
||||
struct mt76x0_dev *dev = hw->priv;
|
||||
struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
|
||||
unsigned int wcid = mvif->group_wcid.idx;
|
||||
|
||||
dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
|
||||
dev->vif_mask &= ~BIT(mvif->idx);
|
||||
}
|
||||
|
||||
static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
|
||||
|
@ -241,8 +241,9 @@ struct xenvif_hash_cache {
|
||||
struct xenvif_hash {
|
||||
unsigned int alg;
|
||||
u32 flags;
|
||||
bool mapping_sel;
|
||||
u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
|
||||
u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE];
|
||||
u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
|
||||
unsigned int size;
|
||||
struct xenvif_hash_cache cache;
|
||||
};
|
||||
|
@ -324,7 +324,8 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
|
||||
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
|
||||
|
||||
vif->hash.size = size;
|
||||
memset(vif->hash.mapping, 0, sizeof(u32) * size);
|
||||
memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
|
||||
sizeof(u32) * size);
|
||||
|
||||
return XEN_NETIF_CTRL_STATUS_SUCCESS;
|
||||
}
|
||||
@ -332,30 +333,48 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
|
||||
u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
|
||||
u32 off)
|
||||
{
|
||||
u32 *mapping = &vif->hash.mapping[off];
|
||||
struct gnttab_copy copy_op = {
|
||||
u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
|
||||
unsigned int nr = 1;
|
||||
struct gnttab_copy copy_op[2] = {{
|
||||
.source.u.ref = gref,
|
||||
.source.domid = vif->domid,
|
||||
.dest.u.gmfn = virt_to_gfn(mapping),
|
||||
.dest.domid = DOMID_SELF,
|
||||
.dest.offset = xen_offset_in_page(mapping),
|
||||
.len = len * sizeof(u32),
|
||||
.len = len * sizeof(*mapping),
|
||||
.flags = GNTCOPY_source_gref
|
||||
};
|
||||
}};
|
||||
|
||||
if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
|
||||
if ((off + len < off) || (off + len > vif->hash.size) ||
|
||||
len > XEN_PAGE_SIZE / sizeof(*mapping))
|
||||
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
|
||||
|
||||
copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
|
||||
copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
|
||||
if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
|
||||
copy_op[1] = copy_op[0];
|
||||
copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
|
||||
copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
|
||||
copy_op[1].dest.offset = 0;
|
||||
copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
|
||||
copy_op[0].len = copy_op[1].source.offset;
|
||||
nr = 2;
|
||||
}
|
||||
|
||||
memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
|
||||
vif->hash.size * sizeof(*mapping));
|
||||
|
||||
if (copy_op[0].len != 0) {
|
||||
gnttab_batch_copy(copy_op, nr);
|
||||
|
||||
if (copy_op[0].status != GNTST_okay ||
|
||||
copy_op[nr - 1].status != GNTST_okay)
|
||||
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
|
||||
}
|
||||
|
||||
while (len-- != 0)
|
||||
if (mapping[off++] >= vif->num_queues)
|
||||
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
|
||||
|
||||
if (copy_op.len != 0) {
|
||||
gnttab_batch_copy(©_op, 1);
|
||||
|
||||
if (copy_op.status != GNTST_okay)
|
||||
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
|
||||
}
|
||||
vif->hash.mapping_sel = !vif->hash.mapping_sel;
|
||||
|
||||
return XEN_NETIF_CTRL_STATUS_SUCCESS;
|
||||
}
|
||||
@ -408,6 +427,8 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
|
||||
}
|
||||
|
||||
if (vif->hash.size != 0) {
|
||||
const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
|
||||
|
||||
seq_puts(m, "\nHash Mapping:\n");
|
||||
|
||||
for (i = 0; i < vif->hash.size; ) {
|
||||
@ -420,7 +441,7 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
|
||||
seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
|
||||
|
||||
for (j = 0; j < n; j++, i++)
|
||||
seq_printf(m, "%4u ", vif->hash.mapping[i]);
|
||||
seq_printf(m, "%4u ", mapping[i]);
|
||||
|
||||
seq_puts(m, "\n");
|
||||
}
|
||||
|
@ -162,7 +162,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
if (size == 0)
|
||||
return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
|
||||
|
||||
return vif->hash.mapping[skb_get_hash_raw(skb) % size];
|
||||
return vif->hash.mapping[vif->hash.mapping_sel]
|
||||
[skb_get_hash_raw(skb) % size];
|
||||
}
|
||||
|
||||
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
@ -610,7 +610,7 @@ static void qeth_put_reply(struct qeth_reply *reply)
|
||||
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
|
||||
struct qeth_card *card)
|
||||
{
|
||||
char *ipa_name;
|
||||
const char *ipa_name;
|
||||
int com = cmd->hdr.command;
|
||||
ipa_name = qeth_get_ipa_cmd_name(com);
|
||||
if (rc)
|
||||
|
@ -148,10 +148,10 @@ EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
|
||||
|
||||
struct ipa_rc_msg {
|
||||
enum qeth_ipa_return_codes rc;
|
||||
char *msg;
|
||||
const char *msg;
|
||||
};
|
||||
|
||||
static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
|
||||
static const struct ipa_rc_msg qeth_ipa_rc_msg[] = {
|
||||
{IPA_RC_SUCCESS, "success"},
|
||||
{IPA_RC_NOTSUPP, "Command not supported"},
|
||||
{IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
|
||||
@ -219,23 +219,23 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
|
||||
|
||||
|
||||
|
||||
char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
|
||||
const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
|
||||
{
|
||||
int x = 0;
|
||||
qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) /
|
||||
sizeof(struct ipa_rc_msg) - 1].rc = rc;
|
||||
while (qeth_ipa_rc_msg[x].rc != rc)
|
||||
x++;
|
||||
int x;
|
||||
|
||||
for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_msg) - 1; x++)
|
||||
if (qeth_ipa_rc_msg[x].rc == rc)
|
||||
return qeth_ipa_rc_msg[x].msg;
|
||||
return qeth_ipa_rc_msg[x].msg;
|
||||
}
|
||||
|
||||
|
||||
struct ipa_cmd_names {
|
||||
enum qeth_ipa_cmds cmd;
|
||||
char *name;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
|
||||
static const struct ipa_cmd_names qeth_ipa_cmd_names[] = {
|
||||
{IPA_CMD_STARTLAN, "startlan"},
|
||||
{IPA_CMD_STOPLAN, "stoplan"},
|
||||
{IPA_CMD_SETVMAC, "setvmac"},
|
||||
@ -267,13 +267,12 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
|
||||
{IPA_CMD_UNKNOWN, "unknown"},
|
||||
};
|
||||
|
||||
char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
|
||||
const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
|
||||
{
|
||||
int x = 0;
|
||||
qeth_ipa_cmd_names[
|
||||
sizeof(qeth_ipa_cmd_names) /
|
||||
sizeof(struct ipa_cmd_names)-1].cmd = cmd;
|
||||
while (qeth_ipa_cmd_names[x].cmd != cmd)
|
||||
x++;
|
||||
int x;
|
||||
|
||||
for (x = 0; x < ARRAY_SIZE(qeth_ipa_cmd_names) - 1; x++)
|
||||
if (qeth_ipa_cmd_names[x].cmd == cmd)
|
||||
return qeth_ipa_cmd_names[x].name;
|
||||
return qeth_ipa_cmd_names[x].name;
|
||||
}
|
||||
|
@ -797,8 +797,8 @@ enum qeth_ipa_arp_return_codes {
|
||||
QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
|
||||
};
|
||||
|
||||
extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
|
||||
extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
|
||||
extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
|
||||
extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
|
||||
|
||||
#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
|
||||
sizeof(struct qeth_ipacmd_setassparms_hdr))
|
||||
|
@ -90,6 +90,8 @@ struct mlx5_hairpin {
|
||||
|
||||
u32 *rqn;
|
||||
u32 *sqn;
|
||||
|
||||
bool peer_gone;
|
||||
};
|
||||
|
||||
struct mlx5_hairpin *
|
||||
|
@ -1730,6 +1730,8 @@ enum netdev_priv_flags {
|
||||
* switch driver and used to set the phys state of the
|
||||
* switch port.
|
||||
*
|
||||
* @wol_enabled: Wake-on-LAN is enabled
|
||||
*
|
||||
* FIXME: cleanup struct net_device such that network protocol info
|
||||
* moves out.
|
||||
*/
|
||||
@ -2014,6 +2016,7 @@ struct net_device {
|
||||
struct lock_class_key *qdisc_tx_busylock;
|
||||
struct lock_class_key *qdisc_running_key;
|
||||
bool proto_down;
|
||||
unsigned wol_enabled:1;
|
||||
};
|
||||
#define to_net_dev(d) container_of(d, struct net_device, dev)
|
||||
|
||||
|
@ -215,6 +215,8 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
|
||||
break;
|
||||
case NFPROTO_ARP:
|
||||
#ifdef CONFIG_NETFILTER_FAMILY_ARP
|
||||
if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
|
||||
break;
|
||||
hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
|
||||
#endif
|
||||
break;
|
||||
|
@ -139,12 +139,6 @@ struct bond_parm_tbl {
|
||||
int mode;
|
||||
};
|
||||
|
||||
struct netdev_notify_work {
|
||||
struct delayed_work work;
|
||||
struct net_device *dev;
|
||||
struct netdev_bonding_info bonding_info;
|
||||
};
|
||||
|
||||
struct slave {
|
||||
struct net_device *dev; /* first - useful for panic debug */
|
||||
struct bonding *bond; /* our master */
|
||||
@ -172,6 +166,7 @@ struct slave {
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
struct netpoll *np;
|
||||
#endif
|
||||
struct delayed_work notify_work;
|
||||
struct kobject kobj;
|
||||
struct rtnl_link_stats64 slave_stats;
|
||||
};
|
||||
|
@ -4852,8 +4852,6 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
|
||||
*
|
||||
* @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
|
||||
* @freq: the freqency(in MHz) to be queried.
|
||||
* @ptr: pointer where the regdb wmm data is to be stored (or %NULL if
|
||||
* irrelevant). This can be used later for deduplication.
|
||||
* @rule: pointer to store the wmm rule from the regulatory db.
|
||||
*
|
||||
* Self-managed wireless drivers can use this function to query
|
||||
|
@ -130,12 +130,6 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
|
||||
return sk->sk_bound_dev_if;
|
||||
}
|
||||
|
||||
static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
|
||||
{
|
||||
return rcu_dereference_check(ireq->ireq_opt,
|
||||
refcount_read(&ireq->req.rsk_refcnt) > 0);
|
||||
}
|
||||
|
||||
struct inet_cork {
|
||||
unsigned int flags;
|
||||
__be32 addr;
|
||||
|
@ -153,7 +153,7 @@
|
||||
* nla_find() find attribute in stream of attributes
|
||||
* nla_find_nested() find attribute in nested attributes
|
||||
* nla_parse() parse and validate stream of attrs
|
||||
* nla_parse_nested() parse nested attribuets
|
||||
* nla_parse_nested() parse nested attributes
|
||||
* nla_for_each_attr() loop over all attributes
|
||||
* nla_for_each_nested() loop over the nested attributes
|
||||
*=========================================================================
|
||||
|
@ -56,7 +56,6 @@ enum rxrpc_peer_trace {
|
||||
rxrpc_peer_new,
|
||||
rxrpc_peer_processing,
|
||||
rxrpc_peer_put,
|
||||
rxrpc_peer_queued_error,
|
||||
};
|
||||
|
||||
enum rxrpc_conn_trace {
|
||||
@ -257,8 +256,7 @@ enum rxrpc_tx_point {
|
||||
EM(rxrpc_peer_got, "GOT") \
|
||||
EM(rxrpc_peer_new, "NEW") \
|
||||
EM(rxrpc_peer_processing, "PRO") \
|
||||
EM(rxrpc_peer_put, "PUT") \
|
||||
E_(rxrpc_peer_queued_error, "QER")
|
||||
E_(rxrpc_peer_put, "PUT")
|
||||
|
||||
#define rxrpc_conn_traces \
|
||||
EM(rxrpc_conn_got, "GOT") \
|
||||
|
@ -2434,9 +2434,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
/* LE address type */
|
||||
addr_type = le_addr_type(cp->addr.type);
|
||||
|
||||
hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
|
||||
|
||||
err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
|
||||
/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
|
||||
err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
|
||||
if (err < 0) {
|
||||
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
|
||||
MGMT_STATUS_NOT_PAIRED, &rp,
|
||||
@ -2450,8 +2449,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Abort any ongoing SMP pairing */
|
||||
smp_cancel_pairing(conn);
|
||||
|
||||
/* Defer clearing up the connection parameters until closing to
|
||||
* give a chance of keeping them if a repairing happens.
|
||||
|
@ -2422,30 +2422,51 @@ unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void smp_cancel_pairing(struct hci_conn *hcon)
|
||||
int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
||||
u8 addr_type)
|
||||
{
|
||||
struct l2cap_conn *conn = hcon->l2cap_data;
|
||||
struct hci_conn *hcon;
|
||||
struct l2cap_conn *conn;
|
||||
struct l2cap_chan *chan;
|
||||
struct smp_chan *smp;
|
||||
int err;
|
||||
|
||||
err = hci_remove_ltk(hdev, bdaddr, addr_type);
|
||||
hci_remove_irk(hdev, bdaddr, addr_type);
|
||||
|
||||
hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type);
|
||||
if (!hcon)
|
||||
goto done;
|
||||
|
||||
conn = hcon->l2cap_data;
|
||||
if (!conn)
|
||||
return;
|
||||
goto done;
|
||||
|
||||
chan = conn->smp;
|
||||
if (!chan)
|
||||
return;
|
||||
goto done;
|
||||
|
||||
l2cap_chan_lock(chan);
|
||||
|
||||
smp = chan->data;
|
||||
if (smp) {
|
||||
/* Set keys to NULL to make sure smp_failure() does not try to
|
||||
* remove and free already invalidated rcu list entries. */
|
||||
smp->ltk = NULL;
|
||||
smp->slave_ltk = NULL;
|
||||
smp->remote_irk = NULL;
|
||||
|
||||
if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
|
||||
smp_failure(conn, 0);
|
||||
else
|
||||
smp_failure(conn, SMP_UNSPECIFIED);
|
||||
err = 0;
|
||||
}
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
|
||||
done:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
|
||||
|
@ -181,7 +181,8 @@ enum smp_key_pref {
|
||||
};
|
||||
|
||||
/* SMP Commands */
|
||||
void smp_cancel_pairing(struct hci_conn *hcon);
|
||||
int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
||||
u8 addr_type);
|
||||
bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
|
||||
enum smp_key_pref key_pref);
|
||||
int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
|
||||
|
@ -835,7 +835,8 @@ static unsigned int ip_sabotage_in(void *priv,
|
||||
struct sk_buff *skb,
|
||||
const struct nf_hook_state *state)
|
||||
{
|
||||
if (skb->nf_bridge && !skb->nf_bridge->in_prerouting) {
|
||||
if (skb->nf_bridge && !skb->nf_bridge->in_prerouting &&
|
||||
!netif_is_l3_master(skb->dev)) {
|
||||
state->okfn(state->net, state->sk, skb);
|
||||
return NF_STOLEN;
|
||||
}
|
||||
|
@ -1483,6 +1483,7 @@ static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
|
||||
static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
|
||||
{
|
||||
struct ethtool_wolinfo wol;
|
||||
int ret;
|
||||
|
||||
if (!dev->ethtool_ops->set_wol)
|
||||
return -EOPNOTSUPP;
|
||||
@ -1490,7 +1491,13 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
|
||||
if (copy_from_user(&wol, useraddr, sizeof(wol)))
|
||||
return -EFAULT;
|
||||
|
||||
return dev->ethtool_ops->set_wol(dev, &wol);
|
||||
ret = dev->ethtool_ops->set_wol(dev, &wol);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev->wol_enabled = !!wol.wolopts;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
|
||||
|
@ -135,27 +135,9 @@ static void queue_process(struct work_struct *work)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether delayed processing was scheduled for our NIC. If so,
|
||||
* we attempt to grab the poll lock and use ->poll() to pump the card.
|
||||
* If this fails, either we've recursed in ->poll() or it's already
|
||||
* running on another CPU.
|
||||
*
|
||||
* Note: we don't mask interrupts with this lock because we're using
|
||||
* trylock here and interrupts are already disabled in the softirq
|
||||
* case. Further, we test the poll_owner to avoid recursion on UP
|
||||
* systems where the lock doesn't exist.
|
||||
*/
|
||||
static void poll_one_napi(struct napi_struct *napi)
|
||||
{
|
||||
int work = 0;
|
||||
|
||||
/* net_rx_action's ->poll() invocations and our's are
|
||||
* synchronized by this test which is only made while
|
||||
* holding the napi->poll_lock.
|
||||
*/
|
||||
if (!test_bit(NAPI_STATE_SCHED, &napi->state))
|
||||
return;
|
||||
int work;
|
||||
|
||||
/* If we set this bit but see that it has already been set,
|
||||
* that indicates that napi has been disabled and we need
|
||||
@ -330,6 +312,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
|
||||
/* It is up to the caller to keep npinfo alive. */
|
||||
struct netpoll_info *npinfo;
|
||||
|
||||
rcu_read_lock_bh();
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
npinfo = rcu_dereference_bh(np->dev->npinfo);
|
||||
@ -374,6 +357,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
|
||||
skb_queue_tail(&npinfo->txq, skb);
|
||||
schedule_delayed_work(&npinfo->tx_work,0);
|
||||
}
|
||||
rcu_read_unlock_bh();
|
||||
}
|
||||
EXPORT_SYMBOL(netpoll_send_skb_on_dev);
|
||||
|
||||
|
@ -1898,10 +1898,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
if (tb[IFLA_IF_NETNSID]) {
|
||||
netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
|
||||
tgt_net = get_target_net(skb->sk, netnsid);
|
||||
if (IS_ERR(tgt_net)) {
|
||||
tgt_net = net;
|
||||
netnsid = -1;
|
||||
}
|
||||
if (IS_ERR(tgt_net))
|
||||
return PTR_ERR(tgt_net);
|
||||
}
|
||||
|
||||
if (tb[IFLA_EXT_MASK])
|
||||
@ -2837,6 +2835,12 @@ struct net_device *rtnl_create_link(struct net *net,
|
||||
else if (ops->get_num_rx_queues)
|
||||
num_rx_queues = ops->get_num_rx_queues();
|
||||
|
||||
if (num_tx_queues < 1 || num_tx_queues > 4096)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (num_rx_queues < 1 || num_rx_queues > 4096)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
|
||||
ops->setup, num_tx_queues, num_rx_queues);
|
||||
if (!dev)
|
||||
|
@ -606,11 +606,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
if (sk->sk_state == DCCP_LISTEN) {
|
||||
if (dh->dccph_type == DCCP_PKT_REQUEST) {
|
||||
/* It is possible that we process SYN packets from backlog,
|
||||
* so we need to make sure to disable BH right there.
|
||||
* so we need to make sure to disable BH and RCU right there.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
local_bh_disable();
|
||||
acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
|
||||
local_bh_enable();
|
||||
rcu_read_unlock();
|
||||
if (!acceptable)
|
||||
return 1;
|
||||
consume_skb(skb);
|
||||
|
@ -493,9 +493,11 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
|
||||
|
||||
dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
|
||||
ireq->ir_rmt_addr);
|
||||
rcu_read_lock();
|
||||
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
|
||||
ireq->ir_rmt_addr,
|
||||
ireq_opt_deref(ireq));
|
||||
rcu_dereference(ireq->ireq_opt));
|
||||
rcu_read_unlock();
|
||||
err = net_xmit_eval(err);
|
||||
}
|
||||
|
||||
|
@ -544,7 +544,8 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
|
||||
struct ip_options_rcu *opt;
|
||||
struct rtable *rt;
|
||||
|
||||
opt = ireq_opt_deref(ireq);
|
||||
rcu_read_lock();
|
||||
opt = rcu_dereference(ireq->ireq_opt);
|
||||
|
||||
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
|
||||
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
|
||||
@ -558,11 +559,13 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
|
||||
goto no_route;
|
||||
if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
|
||||
goto route_err;
|
||||
rcu_read_unlock();
|
||||
return &rt->dst;
|
||||
|
||||
route_err:
|
||||
ip_rt_put(rt);
|
||||
no_route:
|
||||
rcu_read_unlock();
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -149,7 +149,6 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
|
||||
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
|
||||
{
|
||||
struct sockaddr_in sin;
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
__be16 *ports;
|
||||
int end;
|
||||
|
||||
@ -164,7 +163,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
|
||||
ports = (__be16 *)skb_transport_header(skb);
|
||||
|
||||
sin.sin_family = AF_INET;
|
||||
sin.sin_addr.s_addr = iph->daddr;
|
||||
sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
|
||||
sin.sin_port = ports[1];
|
||||
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
|
||||
|
||||
|
@ -48,6 +48,7 @@ static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
|
||||
static int ip_ping_group_range_min[] = { 0, 0 };
|
||||
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
|
||||
static int comp_sack_nr_max = 255;
|
||||
static u32 u32_max_div_HZ = UINT_MAX / HZ;
|
||||
|
||||
/* obsolete */
|
||||
static int sysctl_tcp_low_latency __read_mostly;
|
||||
@ -745,9 +746,10 @@ static struct ctl_table ipv4_net_table[] = {
|
||||
{
|
||||
.procname = "tcp_probe_interval",
|
||||
.data = &init_net.ipv4.sysctl_tcp_probe_interval,
|
||||
.maxlen = sizeof(int),
|
||||
.maxlen = sizeof(u32),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
.proc_handler = proc_douintvec_minmax,
|
||||
.extra2 = &u32_max_div_HZ,
|
||||
},
|
||||
{
|
||||
.procname = "igmp_link_local_mcast_reports",
|
||||
|
@ -6009,11 +6009,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
|
||||
if (th->fin)
|
||||
goto discard;
|
||||
/* It is possible that we process SYN packets from backlog,
|
||||
* so we need to make sure to disable BH right there.
|
||||
* so we need to make sure to disable BH and RCU right there.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
local_bh_disable();
|
||||
acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
|
||||
local_bh_enable();
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!acceptable)
|
||||
return 1;
|
||||
|
@ -943,9 +943,11 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
|
||||
if (skb) {
|
||||
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
|
||||
|
||||
rcu_read_lock();
|
||||
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
|
||||
ireq->ir_rmt_addr,
|
||||
ireq_opt_deref(ireq));
|
||||
rcu_dereference(ireq->ireq_opt));
|
||||
rcu_read_unlock();
|
||||
err = net_xmit_eval(err);
|
||||
}
|
||||
|
||||
|
@ -67,6 +67,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
|
||||
|
||||
if (xo && (xo->flags & XFRM_GRO)) {
|
||||
skb_mac_header_rebuild(skb);
|
||||
skb_reset_transport_header(skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,6 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
|
||||
static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
|
||||
{
|
||||
int ihl = skb->data - skb_transport_header(skb);
|
||||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
|
||||
if (skb->transport_header != skb->network_header) {
|
||||
memmove(skb_transport_header(skb),
|
||||
@ -54,8 +53,7 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
|
||||
skb->network_header = skb->transport_header;
|
||||
}
|
||||
ip_hdr(skb)->tot_len = htons(skb->len + ihl);
|
||||
if (!xo || !(xo->flags & XFRM_GRO))
|
||||
skb_reset_transport_header(skb);
|
||||
skb_reset_transport_header(skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4321,11 +4321,6 @@ static int ip6_route_info_append(struct net *net,
|
||||
if (!nh)
|
||||
return -ENOMEM;
|
||||
nh->fib6_info = rt;
|
||||
err = ip6_convert_metrics(net, rt, r_cfg);
|
||||
if (err) {
|
||||
kfree(nh);
|
||||
return err;
|
||||
}
|
||||
memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
|
||||
list_add_tail(&nh->next, rt6_nh_list);
|
||||
|
||||
|
@ -59,6 +59,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
|
||||
|
||||
if (xo && (xo->flags & XFRM_GRO)) {
|
||||
skb_mac_header_rebuild(skb);
|
||||
skb_reset_transport_header(skb);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,6 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
|
||||
static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
|
||||
{
|
||||
int ihl = skb->data - skb_transport_header(skb);
|
||||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
|
||||
if (skb->transport_header != skb->network_header) {
|
||||
memmove(skb_transport_header(skb),
|
||||
@ -60,8 +59,7 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
|
||||
}
|
||||
ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
|
||||
sizeof(struct ipv6hdr));
|
||||
if (!xo || !(xo->flags & XFRM_GRO))
|
||||
skb_reset_transport_header(skb);
|
||||
skb_reset_transport_header(skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -170,9 +170,11 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
if (toobig && xfrm6_local_dontfrag(skb)) {
|
||||
xfrm6_local_rxpmtu(skb, mtu);
|
||||
kfree_skb(skb);
|
||||
return -EMSGSIZE;
|
||||
} else if (!skb->ignore_df && toobig && skb->sk) {
|
||||
xfrm_local_error(skb, mtu);
|
||||
kfree_skb(skb);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
|
@ -1756,7 +1756,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
|
||||
|
||||
if (local->ops->wake_tx_queue &&
|
||||
type != NL80211_IFTYPE_AP_VLAN &&
|
||||
type != NL80211_IFTYPE_MONITOR)
|
||||
(type != NL80211_IFTYPE_MONITOR ||
|
||||
(params->flags & MONITOR_FLAG_ACTIVE)))
|
||||
txq_size += sizeof(struct txq_info) +
|
||||
local->hw.txq_data_size;
|
||||
|
||||
|
@ -217,7 +217,8 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
|
||||
int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
|
||||
void ieee80211s_init(void);
|
||||
void ieee80211s_update_metric(struct ieee80211_local *local,
|
||||
struct sta_info *sta, struct sk_buff *skb);
|
||||
struct sta_info *sta,
|
||||
struct ieee80211_tx_status *st);
|
||||
void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
|
||||
void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata);
|
||||
int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
|
||||
|
@ -295,15 +295,12 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
|
||||
}
|
||||
|
||||
void ieee80211s_update_metric(struct ieee80211_local *local,
|
||||
struct sta_info *sta, struct sk_buff *skb)
|
||||
struct sta_info *sta,
|
||||
struct ieee80211_tx_status *st)
|
||||
{
|
||||
struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
|
||||
struct ieee80211_tx_info *txinfo = st->info;
|
||||
int failed;
|
||||
|
||||
if (!ieee80211_is_data(hdr->frame_control))
|
||||
return;
|
||||
|
||||
failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
|
||||
|
||||
/* moving average, scaled to 100.
|
||||
|
@ -479,11 +479,6 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
if (dropped) {
|
||||
dev_kfree_skb_any(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
|
||||
u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
|
||||
struct ieee80211_sub_if_data *sdata;
|
||||
@ -506,6 +501,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
} else if (dropped) {
|
||||
dev_kfree_skb_any(skb);
|
||||
} else {
|
||||
/* consumes skb */
|
||||
@ -811,7 +808,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
|
||||
|
||||
rate_control_tx_status(local, sband, status);
|
||||
if (ieee80211_vif_is_mesh(&sta->sdata->vif))
|
||||
ieee80211s_update_metric(local, sta, skb);
|
||||
ieee80211s_update_metric(local, sta, status);
|
||||
|
||||
if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
|
||||
ieee80211_frame_acked(sta, skb);
|
||||
@ -972,6 +969,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
|
||||
}
|
||||
|
||||
rate_control_tx_status(local, sband, status);
|
||||
if (ieee80211_vif_is_mesh(&sta->sdata->vif))
|
||||
ieee80211s_update_metric(local, sta, status);
|
||||
}
|
||||
|
||||
if (acked || noack_success) {
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "ieee80211_i.h"
|
||||
#include "driver-ops.h"
|
||||
#include "rate.h"
|
||||
#include "wme.h"
|
||||
|
||||
/* give usermode some time for retries in setting up the TDLS session */
|
||||
#define TDLS_PEER_SETUP_TIMEOUT (15 * HZ)
|
||||
@ -1010,14 +1011,13 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
|
||||
switch (action_code) {
|
||||
case WLAN_TDLS_SETUP_REQUEST:
|
||||
case WLAN_TDLS_SETUP_RESPONSE:
|
||||
skb_set_queue_mapping(skb, IEEE80211_AC_BK);
|
||||
skb->priority = 2;
|
||||
skb->priority = 256 + 2;
|
||||
break;
|
||||
default:
|
||||
skb_set_queue_mapping(skb, IEEE80211_AC_VI);
|
||||
skb->priority = 5;
|
||||
skb->priority = 256 + 5;
|
||||
break;
|
||||
}
|
||||
skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
|
||||
|
||||
/*
|
||||
* Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress.
|
||||
|
@ -214,6 +214,7 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
|
||||
{
|
||||
struct ieee80211_local *local = tx->local;
|
||||
struct ieee80211_if_managed *ifmgd;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
||||
|
||||
/* driver doesn't support power save */
|
||||
if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
|
||||
@ -242,6 +243,9 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
|
||||
if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
|
||||
return TX_CONTINUE;
|
||||
|
||||
if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK))
|
||||
return TX_CONTINUE;
|
||||
|
||||
ifmgd = &tx->sdata->u.mgd;
|
||||
|
||||
/*
|
||||
@ -1890,7 +1894,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
|
||||
sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
|
||||
|
||||
if (invoke_tx_handlers_early(&tx))
|
||||
return false;
|
||||
return true;
|
||||
|
||||
if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
|
||||
return true;
|
||||
|
@ -1213,8 +1213,8 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
|
||||
#define TCP_NLATTR_SIZE ( \
|
||||
NLA_ALIGN(NLA_HDRLEN + 1) + \
|
||||
NLA_ALIGN(NLA_HDRLEN + 1) + \
|
||||
NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \
|
||||
NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))))
|
||||
NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
|
||||
NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
|
||||
|
||||
static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
|
||||
{
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user