diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index c59ec3ddaa66..3cd397d60434 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5204,7 +5204,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) if (t4_wait_dev_ready(adap) < 0) return PCI_ERS_RESULT_DISCONNECT; - if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL)) + if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0) return PCI_ERS_RESULT_DISCONNECT; adap->flags |= FW_OK; if (adap_init1(adap, &c)) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 234ce6f07544..f544b297c9ab 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -327,6 +327,7 @@ enum vf_state { #define BE_FLAGS_LINK_STATUS_INIT 1 #define BE_FLAGS_WORKER_SCHEDULED (1 << 3) +#define BE_FLAGS_NAPI_ENABLED (1 << 9) #define BE_UC_PMAC_COUNT 30 #define BE_VF_UC_PMAC_COUNT 2 #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 25d3290b8cac..e1e5bb9d9054 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -961,19 +961,8 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); - if (lancer_chip(adapter)) { - req->hdr.version = 2; - req->page_size = 1; /* 1 for 4K */ - AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt, - no_delay); - AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt, - __ilog2_u32(cq->len/256)); - AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1); - AMAP_SET_BITS(struct amap_cq_context_lancer, eventable, - ctxt, 1); - AMAP_SET_BITS(struct amap_cq_context_lancer, eqid, - ctxt, eq->id); - } else { + + if (BEx_chip(adapter)) { AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, coalesce_wm); AMAP_SET_BITS(struct amap_cq_context_be, nodelay, @@ -983,6 +972,18 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); + } else { + req->hdr.version = 2; + req->page_size = 1; /* 1 for 4K */ + AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, + no_delay); + AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, + __ilog2_u32(cq->len/256)); + AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eventable, + ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eqid, + ctxt, eq->id); } be_dws_cpu_to_le(ctxt, sizeof(req->context)); @@ -1763,10 +1764,12 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) req->if_id = cpu_to_le32(adapter->if_handle); if (flags & IFF_PROMISC) { req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | - BE_IF_FLAGS_VLAN_PROMISCUOUS); + BE_IF_FLAGS_VLAN_PROMISCUOUS | + BE_IF_FLAGS_MCAST_PROMISCUOUS); if (value == ON) req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | - BE_IF_FLAGS_VLAN_PROMISCUOUS); + BE_IF_FLAGS_VLAN_PROMISCUOUS | + BE_IF_FLAGS_MCAST_PROMISCUOUS); } else if (flags & IFF_ALLMULTI) { req->if_flags_mask = req->if_flags = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); @@ -2084,7 +2087,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->flash_compl, - msecs_to_jiffies(30000))) + msecs_to_jiffies(60000))) status = -1; else status = adapter->flash_status; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index a855668e0cc5..025bdb0d1764 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -381,7 +381,7 @@ struct amap_cq_context_be { u8 rsvd5[32]; /* dword 3*/ } __packed; -struct amap_cq_context_lancer { +struct amap_cq_context_v2 { u8 rsvd0[12]; /* dword 0*/ u8 coalescwm[2]; /* dword 0*/ u8 nodelay; /* dword 0*/ diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 5733cde88e2c..3d4461adb3b4 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -85,6 +85,7 @@ static const struct be_ethtool_stat et_stats[] = { {DRVSTAT_INFO(tx_pauseframes)}, {DRVSTAT_INFO(tx_controlframes)}, {DRVSTAT_INFO(rx_priority_pause_frames)}, + {DRVSTAT_INFO(tx_priority_pauseframes)}, /* Received packets dropped when an internal fifo going into * main packet buffer tank (PMEM) overflows. */ diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 4babc8a4a543..6c52a60dcdb7 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -410,6 +410,7 @@ static void populate_be_v1_stats(struct be_adapter *adapter) drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop; drvs->tx_pauseframes = port_stats->tx_pauseframes; drvs->tx_controlframes = port_stats->tx_controlframes; + drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes; drvs->jabber_events = port_stats->jabber_events; drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; @@ -471,11 +472,26 @@ static void accumulate_16bit_val(u32 *acc, u16 val) ACCESS_ONCE(*acc) = newacc; } +void populate_erx_stats(struct be_adapter *adapter, + struct be_rx_obj *rxo, + u32 erx_stat) +{ + if (!BEx_chip(adapter)) + rx_stats(rxo)->rx_drops_no_frags = erx_stat; + else + /* below erx HW counter can actually wrap around after + * 65535. Driver accumulates a 32-bit value + */ + accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags, + (u16)erx_stat); +} + void be_parse_stats(struct be_adapter *adapter) { struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter); struct be_rx_obj *rxo; int i; + u32 erx_stat; if (lancer_chip(adapter)) { populate_lancer_stats(adapter); @@ -488,12 +504,8 @@ void be_parse_stats(struct be_adapter *adapter) /* as erx_v1 is longer than v0, ok to use v1 for v0 access */ for_all_rx_queues(adapter, rxo, i) { - /* below erx HW counter can actually wrap around after - * 65535. Driver accumulates a 32-bit value - */ - accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags, - (u16)erx->rx_drops_no_fragments \ - [rxo->q.id]); + erx_stat = erx->rx_drops_no_fragments[rxo->q.id]; + populate_erx_stats(adapter, rxo, erx_stat); } } } @@ -2378,7 +2390,7 @@ static uint be_num_rss_want(struct be_adapter *adapter) return num; } -static void be_msix_enable(struct be_adapter *adapter) +static int be_msix_enable(struct be_adapter *adapter) { #define BE_MIN_MSIX_VECTORS 1 int i, status, num_vec, num_roce_vec = 0; @@ -2403,13 +2415,17 @@ static void be_msix_enable(struct be_adapter *adapter) goto done; } else if (status >= BE_MIN_MSIX_VECTORS) { num_vec = status; - if (pci_enable_msix(adapter->pdev, adapter->msix_entries, - num_vec) == 0) + status = pci_enable_msix(adapter->pdev, adapter->msix_entries, + num_vec); + if (!status) goto done; } dev_warn(dev, "MSIx enable failed\n"); - return; + /* INTx is not supported in VFs, so fail probe if enable_msix fails */ + if (!be_physfn(adapter)) + return status; + return 0; done: if (be_roce_supported(adapter)) { if (num_vec > num_roce_vec) { @@ -2423,7 +2439,7 @@ done: } else adapter->num_msix_vec = num_vec; dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec); - return; + return 0; } static inline int be_msix_vec_get(struct be_adapter *adapter, @@ -2536,8 +2552,11 @@ static int be_close(struct net_device *netdev) be_roce_dev_close(adapter); - for_all_evt_queues(adapter, eqo, i) - napi_disable(&eqo->napi); + if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { + for_all_evt_queues(adapter, eqo, i) + napi_disable(&eqo->napi); + adapter->flags &= ~BE_FLAGS_NAPI_ENABLED; + } be_async_mcc_disable(adapter); @@ -2631,7 +2650,9 @@ static int be_open(struct net_device *netdev) if (status) goto err; - be_irq_register(adapter); + status = be_irq_register(adapter); + if (status) + goto err; for_all_rx_queues(adapter, rxo, i) be_cq_notify(adapter, rxo->cq.id, true, 0); @@ -2645,6 +2666,7 @@ static int be_open(struct net_device *netdev) napi_enable(&eqo->napi); be_eq_notify(adapter, eqo->q.id, true, false, 0); } + adapter->flags |= BE_FLAGS_NAPI_ENABLED; status = be_cmd_link_status_query(adapter, NULL, &link_status, 0); if (!status) @@ -3100,7 +3122,9 @@ static int be_setup(struct be_adapter *adapter) if (status) goto err; - be_msix_enable(adapter); + status = be_msix_enable(adapter); + if (status) + goto err; status = be_evt_queues_create(adapter); if (status) diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 256ae789c143..d175bbd3ffd3 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2496,10 +2496,12 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2, skb->ip_summed = re->skb->ip_summed; skb->csum = re->skb->csum; skb->rxhash = re->skb->rxhash; + skb->vlan_proto = re->skb->vlan_proto; skb->vlan_tci = re->skb->vlan_tci; pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, length, PCI_DMA_FROMDEVICE); + re->skb->vlan_proto = 0; re->skb->vlan_tci = 0; re->skb->rxhash = 0; re->skb->ip_summed = CHECKSUM_NONE; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 59c43918883e..21a5b291b4b3 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -555,8 +555,8 @@ static int cpsw_poll(struct napi_struct *napi, int budget) cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); prim_cpsw = cpsw_get_slave_priv(priv, 0); if (prim_cpsw->irq_enabled == false) { - cpsw_enable_irq(priv); prim_cpsw->irq_enabled = true; + cpsw_enable_irq(priv); } } diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index f7f623a5390e..577c72d5f369 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -100,6 +100,9 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n", rx->size); kfree_skb(rx->ax_skb); + rx->ax_skb = NULL; + rx->size = 0U; + return 0; } diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 09699054b54f..03e8a15d7deb 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -256,8 +256,9 @@ static int mdio_read(struct net_device *dev, int phy_id, int loc) static void mdio_write(struct net_device *dev, int phy_id, int loc, int val) { pegasus_t *pegasus = netdev_priv(dev); + u16 data = val; - write_mii_word(pegasus, phy_id, loc, (__u16 *)&val); + write_mii_word(pegasus, phy_id, loc, &data); } static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 5a88e72090ce..834e405fb57a 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -548,6 +548,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x0265, 4)}, /* ONDA MT8205 4G LTE */ {QMI_FIXED_INTF(0x19d2, 0x0284, 4)}, /* ZTE MF880 */ {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ + {QMI_FIXED_INTF(0x19d2, 0x0412, 4)}, /* Telewell TW-LTE 4G */ {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */ {QMI_FIXED_INTF(0x19d2, 0x1012, 4)}, diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index a2865f17c667..37984e6d4e99 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -51,9 +51,17 @@ * This is the maximum slots a skb can have. If a guest sends a skb * which exceeds this limit it is considered malicious. */ -#define MAX_SKB_SLOTS_DEFAULT 20 -static unsigned int max_skb_slots = MAX_SKB_SLOTS_DEFAULT; -module_param(max_skb_slots, uint, 0444); +#define FATAL_SKB_SLOTS_DEFAULT 20 +static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; +module_param(fatal_skb_slots, uint, 0444); + +/* + * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating + * the maximum slots a valid packet can use. Now this value is defined + * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by + * all backend. + */ +#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN typedef unsigned int pending_ring_idx_t; #define INVALID_PENDING_RING_IDX (~0U) @@ -928,18 +936,20 @@ static void netbk_fatal_tx_err(struct xenvif *vif) static int netbk_count_requests(struct xenvif *vif, struct xen_netif_tx_request *first, - RING_IDX first_idx, struct xen_netif_tx_request *txp, int work_to_do) { RING_IDX cons = vif->tx.req_cons; int slots = 0; int drop_err = 0; + int more_data; if (!(first->flags & XEN_NETTXF_more_data)) return 0; do { + struct xen_netif_tx_request dropped_tx = { 0 }; + if (slots >= work_to_do) { netdev_err(vif->dev, "Asked for %d slots but exceeds this limit\n", @@ -951,28 +961,32 @@ static int netbk_count_requests(struct xenvif *vif, /* This guest is really using too many slots and * considered malicious. */ - if (unlikely(slots >= max_skb_slots)) { + if (unlikely(slots >= fatal_skb_slots)) { netdev_err(vif->dev, "Malicious frontend using %d slots, threshold %u\n", - slots, max_skb_slots); + slots, fatal_skb_slots); netbk_fatal_tx_err(vif); return -E2BIG; } /* Xen network protocol had implicit dependency on - * MAX_SKB_FRAGS. XEN_NETIF_NR_SLOTS_MIN is set to the - * historical MAX_SKB_FRAGS value 18 to honor the same - * behavior as before. Any packet using more than 18 - * slots but less than max_skb_slots slots is dropped + * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to + * the historical MAX_SKB_FRAGS value 18 to honor the + * same behavior as before. Any packet using more than + * 18 slots but less than fatal_skb_slots slots is + * dropped */ - if (!drop_err && slots >= XEN_NETIF_NR_SLOTS_MIN) { + if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { if (net_ratelimit()) netdev_dbg(vif->dev, "Too many slots (%d) exceeding limit (%d), dropping packet\n", - slots, XEN_NETIF_NR_SLOTS_MIN); + slots, XEN_NETBK_LEGACY_SLOTS_MAX); drop_err = -E2BIG; } + if (drop_err) + txp = &dropped_tx; + memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots), sizeof(*txp)); @@ -1002,10 +1016,16 @@ static int netbk_count_requests(struct xenvif *vif, netbk_fatal_tx_err(vif); return -EINVAL; } - } while ((txp++)->flags & XEN_NETTXF_more_data); + + more_data = txp->flags & XEN_NETTXF_more_data; + + if (!drop_err) + txp++; + + } while (more_data); if (drop_err) { - netbk_tx_err(vif, first, first_idx + slots); + netbk_tx_err(vif, first, cons + slots); return drop_err; } @@ -1042,7 +1062,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, struct pending_tx_info *first = NULL; /* At this point shinfo->nr_frags is in fact the number of - * slots, which can be as large as XEN_NETIF_NR_SLOTS_MIN. + * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. */ nr_slots = shinfo->nr_frags; @@ -1404,12 +1424,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) struct sk_buff *skb; int ret; - while ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN + while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX < MAX_PENDING_REQS) && !list_empty(&netbk->net_schedule_list)) { struct xenvif *vif; struct xen_netif_tx_request txreq; - struct xen_netif_tx_request txfrags[max_skb_slots]; + struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; struct page *page; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; u16 pending_idx; @@ -1470,8 +1490,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) continue; } - ret = netbk_count_requests(vif, &txreq, idx, - txfrags, work_to_do); + ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); if (unlikely(ret < 0)) continue; @@ -1498,7 +1517,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) pending_idx = netbk->pending_ring[index]; data_len = (txreq.size > PKT_PROT_LEN && - ret < XEN_NETIF_NR_SLOTS_MIN) ? + ret < XEN_NETBK_LEGACY_SLOTS_MAX) ? PKT_PROT_LEN : txreq.size; skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, @@ -1777,7 +1796,7 @@ static inline int rx_work_todo(struct xen_netbk *netbk) static inline int tx_work_todo(struct xen_netbk *netbk) { - if ((nr_pending_reqs(netbk) + XEN_NETIF_NR_SLOTS_MIN + if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX < MAX_PENDING_REQS) && !list_empty(&netbk->net_schedule_list)) return 1; @@ -1862,11 +1881,11 @@ static int __init netback_init(void) if (!xen_domain()) return -ENODEV; - if (max_skb_slots < XEN_NETIF_NR_SLOTS_MIN) { + if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) { printk(KERN_INFO - "xen-netback: max_skb_slots too small (%d), bump it to XEN_NETIF_NR_SLOTS_MIN (%d)\n", - max_skb_slots, XEN_NETIF_NR_SLOTS_MIN); - max_skb_slots = XEN_NETIF_NR_SLOTS_MIN; + "xen-netback: fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n", + fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX); + fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX; } xen_netbk_group_nr = num_online_cpus(); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 8af508536d36..3a8c8fd63c88 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -628,7 +628,7 @@ static netdev_features_t vlan_dev_fix_features(struct net_device *dev, netdev_features_t features) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; - u32 old_features = features; + netdev_features_t old_features = features; features &= real_dev->vlan_features; features |= NETIF_F_RXCSUM; diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c index c3530a81a33b..950663d4d330 100644 --- a/net/bridge/br_stp_timer.c +++ b/net/bridge/br_stp_timer.c @@ -107,7 +107,7 @@ static void br_tcn_timer_expired(unsigned long arg) br_debug(br, "tcn timer expired\n"); spin_lock(&br->lock); - if (br->dev->flags & IFF_UP) { + if (!br_is_root_bridge(br) && (br->dev->flags & IFF_UP)) { br_transmit_tcn(br); mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time); diff --git a/net/core/dev.c b/net/core/dev.c index 4040673f806a..40b1fadaf637 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2456,7 +2456,7 @@ EXPORT_SYMBOL(netif_skb_features); * 2. skb is fragmented and the device does not support SG. */ static inline int skb_needs_linearize(struct sk_buff *skb, - int features) + netdev_features_t features) { return skb_is_nonlinear(skb) && ((skb_has_frag_list(skb) && diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 5a934ef90f8b..22efdaa76ebf 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1421,7 +1421,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) void __user *useraddr = ifr->ifr_data; u32 ethcmd; int rc; - u32 old_features; + netdev_features_t old_features; if (!dev || !netif_device_present(dev)) return -ENODEV; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index c61b3bb87a16..d01be2a3ae53 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1293,6 +1293,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_GRE | + SKB_GSO_TCPV6 | SKB_GSO_UDP_TUNNEL | 0))) goto out; diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c index d2d5a99fba09..cc22363965d2 100644 --- a/net/ipv4/gre.c +++ b/net/ipv4/gre.c @@ -121,6 +121,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, int ghl = GRE_HEADER_SECTION; struct gre_base_hdr *greh; int mac_len = skb->mac_len; + __be16 protocol = skb->protocol; int tnl_hlen; bool csum; @@ -150,7 +151,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, /* setup inner skb. */ if (greh->protocol == htons(ETH_P_TEB)) { - struct ethhdr *eth = eth_hdr(skb); + struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb); skb->protocol = eth->h_proto; } else { skb->protocol = greh->protocol; @@ -199,6 +200,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb->mac_len = mac_len; + skb->protocol = protocol; } while ((skb = skb->next)); out: return segs; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 6abbe6455129..0ae038a4c7a8 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2311,8 +2311,10 @@ static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, struct sk_buff *segs = ERR_PTR(-EINVAL); int mac_len = skb->mac_len; int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); - int outer_hlen; + struct ethhdr *inner_eth = (struct ethhdr *)skb_inner_mac_header(skb); + __be16 protocol = skb->protocol; netdev_features_t enc_features; + int outer_hlen; if (unlikely(!pskb_may_pull(skb, tnl_hlen))) goto out; @@ -2322,6 +2324,8 @@ static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); + inner_eth = (struct ethhdr *)skb_mac_header(skb); + skb->protocol = inner_eth->h_proto; /* segment inner packet. */ enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); @@ -2358,6 +2362,7 @@ static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, } skb->ip_summed = CHECKSUM_NONE; + skb->protocol = protocol; } while ((skb = skb->next)); out: return segs; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index dd5cd49b0e09..8ec1bca7f859 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -742,36 +742,33 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1, smp_rmb(); - if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) { + /* We could have just memset this but we will lose the + * flexibility of making the priv area sticky + */ - /* We could have just memset this but we will lose the - * flexibility of making the priv area sticky - */ - BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; - BLOCK_NUM_PKTS(pbd1) = 0; - BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); - getnstimeofday(&ts); - h1->ts_first_pkt.ts_sec = ts.tv_sec; - h1->ts_first_pkt.ts_nsec = ts.tv_nsec; - pkc1->pkblk_start = (char *)pbd1; - pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); - BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); - BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; - pbd1->version = pkc1->version; - pkc1->prev = pkc1->nxt_offset; - pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; - prb_thaw_queue(pkc1); - _prb_refresh_rx_retire_blk_timer(pkc1); + BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++; + BLOCK_NUM_PKTS(pbd1) = 0; + BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); - smp_wmb(); + getnstimeofday(&ts); - return; - } + h1->ts_first_pkt.ts_sec = ts.tv_sec; + h1->ts_first_pkt.ts_nsec = ts.tv_nsec; - WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n", - pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num); - dump_stack(); - BUG(); + pkc1->pkblk_start = (char *)pbd1; + pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); + + BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv); + BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN; + + pbd1->version = pkc1->version; + pkc1->prev = pkc1->nxt_offset; + pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size; + + prb_thaw_queue(pkc1); + _prb_refresh_rx_retire_blk_timer(pkc1); + + smp_wmb(); } /* @@ -862,10 +859,6 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, prb_close_block(pkc, pbd, po, status); return; } - - WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd); - dump_stack(); - BUG(); } static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc, diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 25e159c2feb4..e5f3da507823 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -584,8 +584,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf, { int bp_index; - /* - * Prepare broadcast link message for reliable transmission, + /* Prepare broadcast link message for reliable transmission, * if first time trying to send it; * preparation is skipped for broadcast link protocol messages * since they are sent in an unreliable manner and don't need it @@ -611,30 +610,43 @@ static int tipc_bcbearer_send(struct sk_buff *buf, for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary; struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary; + struct tipc_bearer *b = p; + struct sk_buff *tbuf; if (!p) - break; /* no more bearers to try */ + break; /* No more bearers to try */ - tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new); + if (tipc_bearer_blocked(p)) { + if (!s || tipc_bearer_blocked(s)) + continue; /* Can't use either bearer */ + b = s; + } + + tipc_nmap_diff(&bcbearer->remains, &b->nodes, + &bcbearer->remains_new); if (bcbearer->remains_new.count == bcbearer->remains.count) - continue; /* bearer pair doesn't add anything */ + continue; /* Nothing added by bearer pair */ - if (!tipc_bearer_blocked(p)) - tipc_bearer_send(p, buf, &p->bcast_addr); - else if (s && !tipc_bearer_blocked(s)) - /* unable to send on primary bearer */ - tipc_bearer_send(s, buf, &s->bcast_addr); - else - /* unable to send on either bearer */ - continue; + if (bp_index == 0) { + /* Use original buffer for first bearer */ + tipc_bearer_send(b, buf, &b->bcast_addr); + } else { + /* Avoid concurrent buffer access */ + tbuf = pskb_copy(buf, GFP_ATOMIC); + if (!tbuf) + break; + tipc_bearer_send(b, tbuf, &b->bcast_addr); + kfree_skb(tbuf); /* Bearer keeps a clone */ + } + /* Swap bearers for next packet */ if (s) { bcbearer->bpairs[bp_index].primary = s; bcbearer->bpairs[bp_index].secondary = p; } if (bcbearer->remains_new.count == 0) - break; /* all targets reached */ + break; /* All targets reached */ bcbearer->remains = bcbearer->remains_new; }