mirror of
https://github.com/torvalds/linux.git
synced 2024-11-16 17:12:06 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "I'm hoping this is the very last batch of networking fixes for 3.13, here goes nothing: 1) Fix crashes in VLAN's header_ops passthru. 2) Bridge multicast code needs to use BH spinlocks to prevent deadlocks with timers. From Curt Brune. 3) ipv6 tunnels lack proper synchornization when updating percpu statistics. From Li RongQing. 4) Fixes to bnx2x driver from Yaniv Rosner, Dmitry Kravkov and Michal Kalderon. 5) Avoid undefined operator evaluation order in llc code, from Daniel Borkmann. 6) Error paths in various GSO offload paths do not unwind properly, in particular they must undo any modifications they have made to the SKB. From Wei-Chun Chao. 7) Fix RX refill races during restore in virtio-net, from Jason Wang. 8) Fix SKB use after free in LLC code, from Daniel Borkmann. 9) Missing unlock and OOPS in netpoll code when VLAN tag handling fails. 10) Fix vxlan device attachment wrt ipv6, from Fan Du. 11) Don't allow creating infiniband links to non-infiniband devices, from Hangbin Liu. 12) Revert FEC phy reset active low change, it breaks things. From Fabio Estevam. 13) Fix header pointer handling in 6lowpan header building code, from Daniel Borkmann. 14) Fix RSS handling in be2net driver, from Vasundhara Volam. 15) Fix modem port indexing in HSO driver, from Dan Williams" * http://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (38 commits) bridge: use spin_lock_bh() in br_multicast_set_hash_max ipv6: don't install anycast address for /128 addresses on routers hso: fix handling of modem port SERIAL_STATE notifications isdn: Drop big endian cpp checks from telespci and hfc_pci drivers be2net: fix max_evt_qs calculation for BE3 in SR-IOV config be2net: increase the timeout value for loopback-test FW cmd be2net: disable RSS when number of RXQs is reduced to 1 via set-channels xen-netback: Include header for vmalloc net: 6lowpan: fix lowpan_header_create non-compression memcpy call fec: Revert "fec: Do not assume that PHY reset is active low" bnx2x: fix VLAN configuration for VFs. bnx2x: fix AFEX memory overflow bnx2x: Clean before update RSS arrives bnx2x: Correct number of MSI-X vectors for VFs bnx2x: limit number of interrupt vectors for 57711 qlcnic: Fix bug in Tx completion path infiniband: make sure the src net is infiniband when create new link {vxlan, inet6} Mark vxlan_dev flags with VXLAN_F_IPV6 properly cxgb4: allow large buffer size to have page size netpoll: Fix missing TXQ unlock and and OOPS. ...
This commit is contained in:
commit
a707271a81
@ -31,6 +31,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/if_arp.h> /* For ARPHRD_xxx */
|
||||
#include <linux/module.h>
|
||||
#include <net/rtnetlink.h>
|
||||
#include "ipoib.h"
|
||||
@ -103,7 +104,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
|
||||
return -EINVAL;
|
||||
|
||||
pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
|
||||
if (!pdev)
|
||||
if (!pdev || pdev->type != ARPHRD_INFINIBAND)
|
||||
return -ENODEV;
|
||||
|
||||
ppriv = netdev_priv(pdev);
|
||||
|
@ -1643,10 +1643,6 @@ setup_hfcpci(struct IsdnCard *card)
|
||||
int i;
|
||||
struct pci_dev *tmp_hfcpci = NULL;
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
#error "not running on big endian machines now"
|
||||
#endif
|
||||
|
||||
strcpy(tmp, hfcpci_revision);
|
||||
printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
|
||||
|
||||
|
@ -290,10 +290,6 @@ int setup_telespci(struct IsdnCard *card)
|
||||
struct IsdnCardState *cs = card->cs;
|
||||
char tmp[64];
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
#error "not running on big endian machines now"
|
||||
#endif
|
||||
|
||||
strcpy(tmp, telespci_revision);
|
||||
printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp));
|
||||
if (cs->typ != ISDN_CTYPE_TELESPCI)
|
||||
|
@ -1250,7 +1250,10 @@ struct bnx2x_slowpath {
|
||||
* Therefore, if they would have been defined in the same union,
|
||||
* data can get corrupted.
|
||||
*/
|
||||
struct afex_vif_list_ramrod_data func_afex_rdata;
|
||||
union {
|
||||
struct afex_vif_list_ramrod_data viflist_data;
|
||||
struct function_update_data func_update;
|
||||
} func_afex_rdata;
|
||||
|
||||
/* used by dmae command executer */
|
||||
struct dmae_command dmae[MAX_DMAE_C];
|
||||
@ -2499,4 +2502,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
|
||||
#define MCPR_SCRATCH_BASE(bp) \
|
||||
(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
|
||||
|
||||
#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
|
||||
|
||||
#endif /* bnx2x.h */
|
||||
|
@ -3865,6 +3865,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
|
||||
|
||||
bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
|
||||
} else {
|
||||
/* Enable Auto-Detect to support 1G over CL37 as well */
|
||||
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
|
||||
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
|
||||
|
||||
/* Force cl48 sync_status LOW to avoid getting stuck in CL73
|
||||
* parallel-detect loop when CL73 and CL37 are enabled.
|
||||
*/
|
||||
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
|
||||
MDIO_AER_BLOCK_AER_REG, 0);
|
||||
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
|
||||
MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800);
|
||||
bnx2x_set_aer_mmd(params, phy);
|
||||
|
||||
bnx2x_disable_kr2(params, vars, phy);
|
||||
}
|
||||
|
||||
@ -8120,17 +8133,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
|
||||
*edc_mode = EDC_MODE_ACTIVE_DAC;
|
||||
else
|
||||
check_limiting_mode = 1;
|
||||
} else if (copper_module_type &
|
||||
SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
|
||||
} else {
|
||||
*edc_mode = EDC_MODE_PASSIVE_DAC;
|
||||
/* Even in case PASSIVE_DAC indication is not set,
|
||||
* treat it as a passive DAC cable, since some cables
|
||||
* don't have this indication.
|
||||
*/
|
||||
if (copper_module_type &
|
||||
SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
|
||||
DP(NETIF_MSG_LINK,
|
||||
"Passive Copper cable detected\n");
|
||||
*edc_mode =
|
||||
EDC_MODE_PASSIVE_DAC;
|
||||
} else {
|
||||
DP(NETIF_MSG_LINK,
|
||||
"Unknown copper-cable-type 0x%x !!!\n",
|
||||
copper_module_type);
|
||||
return -EINVAL;
|
||||
} else {
|
||||
DP(NETIF_MSG_LINK,
|
||||
"Unknown copper-cable-type\n");
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -10825,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
|
||||
(1<<11));
|
||||
|
||||
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
|
||||
(phy->speed_cap_mask &
|
||||
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
|
||||
(phy->req_line_speed == SPEED_1000)) {
|
||||
(phy->speed_cap_mask &
|
||||
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
|
||||
(phy->req_line_speed == SPEED_1000)) {
|
||||
an_1000_val |= (1<<8);
|
||||
autoneg_val |= (1<<9 | 1<<12);
|
||||
if (phy->req_duplex == DUPLEX_FULL)
|
||||
@ -10843,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
|
||||
0x09,
|
||||
&an_1000_val);
|
||||
|
||||
/* Set 100 speed advertisement */
|
||||
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
|
||||
(phy->speed_cap_mask &
|
||||
(PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
|
||||
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
|
||||
an_10_100_val |= (1<<7);
|
||||
/* Enable autoneg and restart autoneg for legacy speeds */
|
||||
autoneg_val |= (1<<9 | 1<<12);
|
||||
|
||||
if (phy->req_duplex == DUPLEX_FULL)
|
||||
an_10_100_val |= (1<<8);
|
||||
DP(NETIF_MSG_LINK, "Advertising 100M\n");
|
||||
}
|
||||
|
||||
/* Set 10 speed advertisement */
|
||||
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
|
||||
(phy->speed_cap_mask &
|
||||
(PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
|
||||
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
|
||||
an_10_100_val |= (1<<5);
|
||||
autoneg_val |= (1<<9 | 1<<12);
|
||||
if (phy->req_duplex == DUPLEX_FULL)
|
||||
/* Advertise 10/100 link speed */
|
||||
if (phy->req_line_speed == SPEED_AUTO_NEG) {
|
||||
if (phy->speed_cap_mask &
|
||||
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
|
||||
an_10_100_val |= (1<<5);
|
||||
autoneg_val |= (1<<9 | 1<<12);
|
||||
DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
|
||||
}
|
||||
if (phy->speed_cap_mask &
|
||||
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
|
||||
an_10_100_val |= (1<<6);
|
||||
DP(NETIF_MSG_LINK, "Advertising 10M\n");
|
||||
autoneg_val |= (1<<9 | 1<<12);
|
||||
DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
|
||||
}
|
||||
if (phy->speed_cap_mask &
|
||||
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
|
||||
an_10_100_val |= (1<<7);
|
||||
autoneg_val |= (1<<9 | 1<<12);
|
||||
DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
|
||||
}
|
||||
if (phy->speed_cap_mask &
|
||||
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
|
||||
an_10_100_val |= (1<<8);
|
||||
autoneg_val |= (1<<9 | 1<<12);
|
||||
DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
|
||||
}
|
||||
}
|
||||
|
||||
/* Only 10/100 are allowed to work in FORCE mode */
|
||||
@ -13342,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
|
||||
DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
|
||||
old_status, status);
|
||||
|
||||
/* Do not touch the link in case physical link down */
|
||||
if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
|
||||
return 1;
|
||||
|
||||
/* a. Update shmem->link_status accordingly
|
||||
* b. Update link_vars->link_up
|
||||
*/
|
||||
@ -13550,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
|
||||
*/
|
||||
not_kr2_device = (((base_page & 0x8000) == 0) ||
|
||||
(((base_page & 0x8000) &&
|
||||
((next_page & 0xe0) == 0x2))));
|
||||
((next_page & 0xe0) == 0x20))));
|
||||
|
||||
/* In case KR2 is already disabled, check if we need to re-enable it */
|
||||
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
|
||||
|
@ -11447,9 +11447,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
|
||||
}
|
||||
}
|
||||
|
||||
/* adjust igu_sb_cnt to MF for E1x */
|
||||
if (CHIP_IS_E1x(bp) && IS_MF(bp))
|
||||
bp->igu_sb_cnt /= E1HVN_MAX;
|
||||
/* adjust igu_sb_cnt to MF for E1H */
|
||||
if (CHIP_IS_E1H(bp) && IS_MF(bp))
|
||||
bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
|
||||
|
||||
/* port info */
|
||||
bnx2x_get_port_hwinfo(bp);
|
||||
|
@ -7179,6 +7179,7 @@ Theotherbitsarereservedandshouldbezero*/
|
||||
#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
|
||||
#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
|
||||
#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
|
||||
#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa
|
||||
#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
|
||||
#define MDIO_WC_REG_XGXS_STATUS3 0x8129
|
||||
#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
|
||||
|
@ -2038,6 +2038,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
|
||||
struct bnx2x_vlan_mac_ramrod_params p;
|
||||
struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
|
||||
struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
|
||||
unsigned long flags;
|
||||
int read_lock;
|
||||
int rc = 0;
|
||||
|
||||
@ -2046,8 +2047,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
|
||||
spin_lock_bh(&exeq->lock);
|
||||
|
||||
list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
|
||||
if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
|
||||
*vlan_mac_flags) {
|
||||
flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
|
||||
if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
|
||||
BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
|
||||
rc = exeq->remove(bp, exeq->owner, exeq_pos);
|
||||
if (rc) {
|
||||
BNX2X_ERR("Failed to remove command\n");
|
||||
@ -2080,7 +2082,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
|
||||
return read_lock;
|
||||
|
||||
list_for_each_entry(pos, &o->head, link) {
|
||||
if (pos->vlan_mac_flags == *vlan_mac_flags) {
|
||||
flags = pos->vlan_mac_flags;
|
||||
if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
|
||||
BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
|
||||
p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
|
||||
memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
|
||||
rc = bnx2x_config_vlan_mac(bp, &p);
|
||||
@ -4382,8 +4386,11 @@ int bnx2x_config_rss(struct bnx2x *bp,
|
||||
struct bnx2x_raw_obj *r = &o->raw;
|
||||
|
||||
/* Do nothing if only driver cleanup was requested */
|
||||
if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
|
||||
if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
|
||||
DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
|
||||
p->ramrod_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
r->set_pending(r);
|
||||
|
||||
|
@ -266,6 +266,13 @@ enum {
|
||||
BNX2X_DONT_CONSUME_CAM_CREDIT,
|
||||
BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
|
||||
};
|
||||
/* When looking for matching filters, some flags are not interesting */
|
||||
#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
|
||||
1 << BNX2X_ETH_MAC | \
|
||||
1 << BNX2X_ISCSI_ETH_MAC | \
|
||||
1 << BNX2X_NETQ_ETH_MAC)
|
||||
#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
|
||||
((flags) & BNX2X_VLAN_MAC_CMP_MASK)
|
||||
|
||||
struct bnx2x_vlan_mac_ramrod_params {
|
||||
/* Object to run the command from */
|
||||
|
@ -1209,6 +1209,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
||||
/* next state */
|
||||
vfop->state = BNX2X_VFOP_RXMODE_DONE;
|
||||
|
||||
/* record the accept flags in vfdb so hypervisor can modify them
|
||||
* if necessary
|
||||
*/
|
||||
bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
|
||||
ramrod->rx_accept_flags;
|
||||
vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
|
||||
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
|
||||
op_err:
|
||||
@ -1224,39 +1229,43 @@ op_pending:
|
||||
return;
|
||||
}
|
||||
|
||||
static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
|
||||
struct bnx2x_rx_mode_ramrod_params *ramrod,
|
||||
struct bnx2x_virtf *vf,
|
||||
unsigned long accept_flags)
|
||||
{
|
||||
struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
|
||||
|
||||
memset(ramrod, 0, sizeof(*ramrod));
|
||||
ramrod->cid = vfq->cid;
|
||||
ramrod->cl_id = vfq_cl_id(vf, vfq);
|
||||
ramrod->rx_mode_obj = &bp->rx_mode_obj;
|
||||
ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
|
||||
ramrod->rx_accept_flags = accept_flags;
|
||||
ramrod->tx_accept_flags = accept_flags;
|
||||
ramrod->pstate = &vf->filter_state;
|
||||
ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
|
||||
|
||||
set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
|
||||
set_bit(RAMROD_RX, &ramrod->ramrod_flags);
|
||||
set_bit(RAMROD_TX, &ramrod->ramrod_flags);
|
||||
|
||||
ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
|
||||
ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
|
||||
}
|
||||
|
||||
int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vfop_cmd *cmd,
|
||||
int qid, unsigned long accept_flags)
|
||||
{
|
||||
struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
|
||||
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
||||
|
||||
if (vfop) {
|
||||
struct bnx2x_rx_mode_ramrod_params *ramrod =
|
||||
&vf->op_params.rx_mode;
|
||||
|
||||
memset(ramrod, 0, sizeof(*ramrod));
|
||||
|
||||
/* Prepare ramrod parameters */
|
||||
ramrod->cid = vfq->cid;
|
||||
ramrod->cl_id = vfq_cl_id(vf, vfq);
|
||||
ramrod->rx_mode_obj = &bp->rx_mode_obj;
|
||||
ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
|
||||
|
||||
ramrod->rx_accept_flags = accept_flags;
|
||||
ramrod->tx_accept_flags = accept_flags;
|
||||
ramrod->pstate = &vf->filter_state;
|
||||
ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
|
||||
|
||||
set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
|
||||
set_bit(RAMROD_RX, &ramrod->ramrod_flags);
|
||||
set_bit(RAMROD_TX, &ramrod->ramrod_flags);
|
||||
|
||||
ramrod->rdata =
|
||||
bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
|
||||
ramrod->rdata_mapping =
|
||||
bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
|
||||
bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
|
||||
|
||||
bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
|
||||
bnx2x_vfop_rxmode, cmd->done);
|
||||
@ -3202,13 +3211,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
|
||||
bnx2x_iov_static_resc(bp, vf);
|
||||
}
|
||||
|
||||
/* prepare msix vectors in VF configuration space */
|
||||
/* prepare msix vectors in VF configuration space - the value in the
|
||||
* PCI configuration space should be the index of the last entry,
|
||||
* namely one less than the actual size of the table
|
||||
*/
|
||||
for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
|
||||
bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
|
||||
REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
|
||||
num_vf_queues);
|
||||
num_vf_queues - 1);
|
||||
DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
|
||||
vf_idx, num_vf_queues);
|
||||
vf_idx, num_vf_queues - 1);
|
||||
}
|
||||
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
|
||||
|
||||
@ -3436,10 +3448,18 @@ out:
|
||||
|
||||
int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
|
||||
{
|
||||
struct bnx2x *bp = netdev_priv(dev);
|
||||
int rc, q_logical_state;
|
||||
struct bnx2x_virtf *vf = NULL;
|
||||
struct bnx2x_queue_state_params q_params = {NULL};
|
||||
struct bnx2x_vlan_mac_ramrod_params ramrod_param;
|
||||
struct bnx2x_queue_update_params *update_params;
|
||||
struct pf_vf_bulletin_content *bulletin = NULL;
|
||||
struct bnx2x_rx_mode_ramrod_params rx_ramrod;
|
||||
struct bnx2x *bp = netdev_priv(dev);
|
||||
struct bnx2x_vlan_mac_obj *vlan_obj;
|
||||
unsigned long vlan_mac_flags = 0;
|
||||
unsigned long ramrod_flags = 0;
|
||||
struct bnx2x_virtf *vf = NULL;
|
||||
unsigned long accept_flags;
|
||||
int rc;
|
||||
|
||||
/* sanity and init */
|
||||
rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
|
||||
@ -3457,104 +3477,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
|
||||
/* update PF's copy of the VF's bulletin. No point in posting the vlan
|
||||
* to the VF since it doesn't have anything to do with it. But it useful
|
||||
* to store it here in case the VF is not up yet and we can only
|
||||
* configure the vlan later when it does.
|
||||
* configure the vlan later when it does. Treat vlan id 0 as remove the
|
||||
* Host tag.
|
||||
*/
|
||||
bulletin->valid_bitmap |= 1 << VLAN_VALID;
|
||||
if (vlan > 0)
|
||||
bulletin->valid_bitmap |= 1 << VLAN_VALID;
|
||||
else
|
||||
bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
|
||||
bulletin->vlan = vlan;
|
||||
|
||||
/* is vf initialized and queue set up? */
|
||||
q_logical_state =
|
||||
bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
|
||||
if (vf->state == VF_ENABLED &&
|
||||
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
|
||||
/* configure the vlan in device on this vf's queue */
|
||||
unsigned long ramrod_flags = 0;
|
||||
unsigned long vlan_mac_flags = 0;
|
||||
struct bnx2x_vlan_mac_obj *vlan_obj =
|
||||
&bnx2x_leading_vfq(vf, vlan_obj);
|
||||
struct bnx2x_vlan_mac_ramrod_params ramrod_param;
|
||||
struct bnx2x_queue_state_params q_params = {NULL};
|
||||
struct bnx2x_queue_update_params *update_params;
|
||||
if (vf->state != VF_ENABLED ||
|
||||
bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
|
||||
BNX2X_Q_LOGICAL_STATE_ACTIVE)
|
||||
return rc;
|
||||
|
||||
rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
|
||||
if (rc)
|
||||
return rc;
|
||||
memset(&ramrod_param, 0, sizeof(ramrod_param));
|
||||
/* configure the vlan in device on this vf's queue */
|
||||
vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
|
||||
rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* must lock vfpf channel to protect against vf flows */
|
||||
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
|
||||
/* must lock vfpf channel to protect against vf flows */
|
||||
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
|
||||
|
||||
/* remove existing vlans */
|
||||
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
|
||||
rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
|
||||
&ramrod_flags);
|
||||
if (rc) {
|
||||
BNX2X_ERR("failed to delete vlans\n");
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* send queue update ramrod to configure default vlan and silent
|
||||
* vlan removal
|
||||
*/
|
||||
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
|
||||
q_params.cmd = BNX2X_Q_CMD_UPDATE;
|
||||
q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
|
||||
update_params = &q_params.params.update;
|
||||
__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
|
||||
&update_params->update_flags);
|
||||
__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
|
||||
&update_params->update_flags);
|
||||
|
||||
if (vlan == 0) {
|
||||
/* if vlan is 0 then we want to leave the VF traffic
|
||||
* untagged, and leave the incoming traffic untouched
|
||||
* (i.e. do not remove any vlan tags).
|
||||
*/
|
||||
__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
|
||||
&update_params->update_flags);
|
||||
__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
|
||||
&update_params->update_flags);
|
||||
} else {
|
||||
/* configure the new vlan to device */
|
||||
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
|
||||
ramrod_param.vlan_mac_obj = vlan_obj;
|
||||
ramrod_param.ramrod_flags = ramrod_flags;
|
||||
ramrod_param.user_req.u.vlan.vlan = vlan;
|
||||
ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
|
||||
rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
|
||||
if (rc) {
|
||||
BNX2X_ERR("failed to configure vlan\n");
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* configure default vlan to vf queue and set silent
|
||||
* vlan removal (the vf remains unaware of this vlan).
|
||||
*/
|
||||
update_params = &q_params.params.update;
|
||||
__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
|
||||
&update_params->update_flags);
|
||||
__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
|
||||
&update_params->update_flags);
|
||||
update_params->def_vlan = vlan;
|
||||
}
|
||||
|
||||
/* Update the Queue state */
|
||||
rc = bnx2x_queue_state_change(bp, &q_params);
|
||||
if (rc) {
|
||||
BNX2X_ERR("Failed to configure default VLAN\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* clear the flag indicating that this VF needs its vlan
|
||||
* (will only be set if the HV configured the Vlan before vf was
|
||||
* up and we were called because the VF came up later
|
||||
*/
|
||||
out:
|
||||
vf->cfg_flags &= ~VF_CFG_VLAN;
|
||||
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
|
||||
/* remove existing vlans */
|
||||
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
|
||||
rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
|
||||
&ramrod_flags);
|
||||
if (rc) {
|
||||
BNX2X_ERR("failed to delete vlans\n");
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* need to remove/add the VF's accept_any_vlan bit */
|
||||
accept_flags = bnx2x_leading_vfq(vf, accept_flags);
|
||||
if (vlan)
|
||||
clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
|
||||
else
|
||||
set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
|
||||
|
||||
bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
|
||||
accept_flags);
|
||||
bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
|
||||
bnx2x_config_rx_mode(bp, &rx_ramrod);
|
||||
|
||||
/* configure the new vlan to device */
|
||||
memset(&ramrod_param, 0, sizeof(ramrod_param));
|
||||
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
|
||||
ramrod_param.vlan_mac_obj = vlan_obj;
|
||||
ramrod_param.ramrod_flags = ramrod_flags;
|
||||
set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
|
||||
&ramrod_param.user_req.vlan_mac_flags);
|
||||
ramrod_param.user_req.u.vlan.vlan = vlan;
|
||||
ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
|
||||
rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
|
||||
if (rc) {
|
||||
BNX2X_ERR("failed to configure vlan\n");
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* send queue update ramrod to configure default vlan and silent
|
||||
* vlan removal
|
||||
*/
|
||||
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
|
||||
q_params.cmd = BNX2X_Q_CMD_UPDATE;
|
||||
q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
|
||||
update_params = &q_params.params.update;
|
||||
__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
|
||||
&update_params->update_flags);
|
||||
__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
|
||||
&update_params->update_flags);
|
||||
if (vlan == 0) {
|
||||
/* if vlan is 0 then we want to leave the VF traffic
|
||||
* untagged, and leave the incoming traffic untouched
|
||||
* (i.e. do not remove any vlan tags).
|
||||
*/
|
||||
__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
|
||||
&update_params->update_flags);
|
||||
__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
|
||||
&update_params->update_flags);
|
||||
} else {
|
||||
/* configure default vlan to vf queue and set silent
|
||||
* vlan removal (the vf remains unaware of this vlan).
|
||||
*/
|
||||
__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
|
||||
&update_params->update_flags);
|
||||
__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
|
||||
&update_params->update_flags);
|
||||
update_params->def_vlan = vlan;
|
||||
update_params->silent_removal_value =
|
||||
vlan & VLAN_VID_MASK;
|
||||
update_params->silent_removal_mask = VLAN_VID_MASK;
|
||||
}
|
||||
|
||||
/* Update the Queue state */
|
||||
rc = bnx2x_queue_state_change(bp, &q_params);
|
||||
if (rc) {
|
||||
BNX2X_ERR("Failed to configure default VLAN\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
/* clear the flag indicating that this VF needs its vlan
|
||||
* (will only be set if the HV configured the Vlan before vf was
|
||||
* up and we were called because the VF came up later
|
||||
*/
|
||||
out:
|
||||
vf->cfg_flags &= ~VF_CFG_VLAN;
|
||||
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -74,6 +74,7 @@ struct bnx2x_vf_queue {
|
||||
/* VLANs object */
|
||||
struct bnx2x_vlan_mac_obj vlan_obj;
|
||||
atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
|
||||
unsigned long accept_flags; /* last accept flags configured */
|
||||
|
||||
/* Queue Slow-path State object */
|
||||
struct bnx2x_queue_sp_obj sp_obj;
|
||||
|
@ -1598,6 +1598,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
||||
|
||||
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
|
||||
unsigned long accept = 0;
|
||||
struct pf_vf_bulletin_content *bulletin =
|
||||
BP_VF_BULLETIN(bp, vf->index);
|
||||
|
||||
/* covert VF-PF if mask to bnx2x accept flags */
|
||||
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
|
||||
@ -1617,9 +1619,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
||||
__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
|
||||
|
||||
/* A packet arriving the vf's mac should be accepted
|
||||
* with any vlan
|
||||
* with any vlan, unless a vlan has already been
|
||||
* configured.
|
||||
*/
|
||||
__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
|
||||
if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
|
||||
__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
|
||||
|
||||
/* set rx-mode */
|
||||
rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
|
||||
@ -1710,6 +1714,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
|
||||
goto response;
|
||||
}
|
||||
}
|
||||
/* if vlan was set by hypervisor we don't allow guest to config vlan */
|
||||
if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
|
||||
int i;
|
||||
|
||||
/* search for vlan filters */
|
||||
for (i = 0; i < filters->n_mac_vlan_filters; i++) {
|
||||
if (filters->filters[i].flags &
|
||||
VFPF_Q_FILTER_VLAN_TAG_VALID) {
|
||||
BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
|
||||
vf->abs_vfid);
|
||||
vf->op_rc = -EPERM;
|
||||
goto response;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* verify vf_qid */
|
||||
if (filters->vf_qid > vf_rxq_count(vf))
|
||||
@ -1805,6 +1824,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
|
||||
|
||||
/* flags handled individually for backward/forward compatability */
|
||||
vf_op_params->rss_flags = 0;
|
||||
vf_op_params->ramrod_flags = 0;
|
||||
|
||||
if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
|
||||
__set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
|
||||
if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
|
||||
|
@ -2581,7 +2581,7 @@ static int t4_sge_init_soft(struct adapter *adap)
|
||||
#undef READ_FL_BUF
|
||||
|
||||
if (fl_small_pg != PAGE_SIZE ||
|
||||
(fl_large_pg != 0 && (fl_large_pg <= fl_small_pg ||
|
||||
(fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
|
||||
(fl_large_pg & (fl_large_pg-1)) != 0))) {
|
||||
dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
|
||||
fl_small_pg, fl_large_pg);
|
||||
|
@ -104,6 +104,7 @@ static inline char *nic_name(struct pci_dev *pdev)
|
||||
#define BE3_MAX_RSS_QS 16
|
||||
#define BE3_MAX_TX_QS 16
|
||||
#define BE3_MAX_EVT_QS 16
|
||||
#define BE3_SRIOV_MAX_EVT_QS 8
|
||||
|
||||
#define MAX_RX_QS 32
|
||||
#define MAX_EVT_QS 32
|
||||
@ -480,7 +481,7 @@ struct be_adapter {
|
||||
struct list_head entry;
|
||||
|
||||
u32 flash_status;
|
||||
struct completion flash_compl;
|
||||
struct completion et_cmd_compl;
|
||||
|
||||
struct be_resources res; /* resources available for the func */
|
||||
u16 num_vfs; /* Number of VFs provisioned by PF */
|
||||
|
@ -141,11 +141,17 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
|
||||
subsystem = resp_hdr->subsystem;
|
||||
}
|
||||
|
||||
if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
|
||||
subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
|
||||
complete(&adapter->et_cmd_compl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
|
||||
(opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
|
||||
(subsystem == CMD_SUBSYSTEM_COMMON)) {
|
||||
adapter->flash_status = compl_status;
|
||||
complete(&adapter->flash_compl);
|
||||
complete(&adapter->et_cmd_compl);
|
||||
}
|
||||
|
||||
if (compl_status == MCC_STATUS_SUCCESS) {
|
||||
@ -2017,6 +2023,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
|
||||
0x3ea83c02, 0x4a110304};
|
||||
int status;
|
||||
|
||||
if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
|
||||
return 0;
|
||||
|
||||
if (mutex_lock_interruptible(&adapter->mbox_lock))
|
||||
return -1;
|
||||
|
||||
@ -2160,7 +2169,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
|
||||
be_mcc_notify(adapter);
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
|
||||
if (!wait_for_completion_timeout(&adapter->flash_compl,
|
||||
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
|
||||
msecs_to_jiffies(60000)))
|
||||
status = -1;
|
||||
else
|
||||
@ -2255,8 +2264,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
|
||||
be_mcc_notify(adapter);
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
|
||||
if (!wait_for_completion_timeout(&adapter->flash_compl,
|
||||
msecs_to_jiffies(40000)))
|
||||
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
|
||||
msecs_to_jiffies(40000)))
|
||||
status = -1;
|
||||
else
|
||||
status = adapter->flash_status;
|
||||
@ -2367,6 +2376,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
|
||||
{
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_loopback_test *req;
|
||||
struct be_cmd_resp_loopback_test *resp;
|
||||
int status;
|
||||
|
||||
spin_lock_bh(&adapter->mcc_lock);
|
||||
@ -2381,8 +2391,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
|
||||
|
||||
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
|
||||
OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
|
||||
req->hdr.timeout = cpu_to_le32(4);
|
||||
|
||||
req->hdr.timeout = cpu_to_le32(15);
|
||||
req->pattern = cpu_to_le64(pattern);
|
||||
req->src_port = cpu_to_le32(port_num);
|
||||
req->dest_port = cpu_to_le32(port_num);
|
||||
@ -2390,12 +2400,15 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
|
||||
req->num_pkts = cpu_to_le32(num_pkts);
|
||||
req->loopback_type = cpu_to_le32(loopback_type);
|
||||
|
||||
status = be_mcc_notify_wait(adapter);
|
||||
if (!status) {
|
||||
struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
|
||||
status = le32_to_cpu(resp->status);
|
||||
}
|
||||
be_mcc_notify(adapter);
|
||||
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
|
||||
wait_for_completion(&adapter->et_cmd_compl);
|
||||
resp = embedded_payload(wrb);
|
||||
status = le32_to_cpu(resp->status);
|
||||
|
||||
return status;
|
||||
err:
|
||||
spin_unlock_bh(&adapter->mcc_lock);
|
||||
return status;
|
||||
|
@ -2744,13 +2744,16 @@ static int be_rx_qs_create(struct be_adapter *adapter)
|
||||
if (!BEx_chip(adapter))
|
||||
adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
|
||||
RSS_ENABLE_UDP_IPV6;
|
||||
} else {
|
||||
/* Disable RSS, if only default RX Q is created */
|
||||
adapter->rss_flags = RSS_ENABLE_NONE;
|
||||
}
|
||||
|
||||
rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
|
||||
128);
|
||||
if (rc) {
|
||||
adapter->rss_flags = 0;
|
||||
return rc;
|
||||
}
|
||||
rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
|
||||
128);
|
||||
if (rc) {
|
||||
adapter->rss_flags = RSS_ENABLE_NONE;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* First time posting */
|
||||
@ -3124,11 +3127,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
|
||||
{
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
bool use_sriov = false;
|
||||
int max_vfs;
|
||||
|
||||
max_vfs = pci_sriov_get_totalvfs(pdev);
|
||||
|
||||
if (BE3_chip(adapter) && sriov_want(adapter)) {
|
||||
int max_vfs;
|
||||
|
||||
max_vfs = pci_sriov_get_totalvfs(pdev);
|
||||
res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
|
||||
use_sriov = res->max_vfs;
|
||||
}
|
||||
@ -3159,7 +3162,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
|
||||
BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
|
||||
res->max_rx_qs = res->max_rss_qs + 1;
|
||||
|
||||
res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
|
||||
if (be_physfn(adapter))
|
||||
res->max_evt_qs = (max_vfs > 0) ?
|
||||
BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
|
||||
else
|
||||
res->max_evt_qs = 1;
|
||||
|
||||
res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
|
||||
if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
|
||||
@ -4205,7 +4212,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
|
||||
spin_lock_init(&adapter->mcc_lock);
|
||||
spin_lock_init(&adapter->mcc_cq_lock);
|
||||
|
||||
init_completion(&adapter->flash_compl);
|
||||
init_completion(&adapter->et_cmd_compl);
|
||||
pci_save_state(adapter->pdev);
|
||||
return 0;
|
||||
|
||||
|
@ -2049,8 +2049,6 @@ static void fec_reset_phy(struct platform_device *pdev)
|
||||
int err, phy_reset;
|
||||
int msec = 1;
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
enum of_gpio_flags flags;
|
||||
bool port;
|
||||
|
||||
if (!np)
|
||||
return;
|
||||
@ -2060,22 +2058,18 @@ static void fec_reset_phy(struct platform_device *pdev)
|
||||
if (msec > 1000)
|
||||
msec = 1;
|
||||
|
||||
phy_reset = of_get_named_gpio_flags(np, "phy-reset-gpios", 0, &flags);
|
||||
phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
|
||||
if (!gpio_is_valid(phy_reset))
|
||||
return;
|
||||
|
||||
if (flags & OF_GPIO_ACTIVE_LOW)
|
||||
port = GPIOF_OUT_INIT_LOW;
|
||||
else
|
||||
port = GPIOF_OUT_INIT_HIGH;
|
||||
|
||||
err = devm_gpio_request_one(&pdev->dev, phy_reset, port, "phy-reset");
|
||||
err = devm_gpio_request_one(&pdev->dev, phy_reset,
|
||||
GPIOF_OUT_INIT_LOW, "phy-reset");
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
|
||||
return;
|
||||
}
|
||||
msleep(msec);
|
||||
gpio_set_value(phy_reset, !port);
|
||||
gpio_set_value(phy_reset, 1);
|
||||
}
|
||||
#else /* CONFIG_OF */
|
||||
static void fec_reset_phy(struct platform_device *pdev)
|
||||
|
@ -487,6 +487,7 @@ struct qlcnic_hardware_context {
|
||||
struct qlcnic_mailbox *mailbox;
|
||||
u8 extend_lb_time;
|
||||
u8 phys_port_id[ETH_ALEN];
|
||||
u8 lb_mode;
|
||||
};
|
||||
|
||||
struct qlcnic_adapter_stats {
|
||||
@ -578,6 +579,8 @@ struct qlcnic_host_tx_ring {
|
||||
dma_addr_t phys_addr;
|
||||
dma_addr_t hw_cons_phys_addr;
|
||||
struct netdev_queue *txq;
|
||||
/* Lock to protect Tx descriptors cleanup */
|
||||
spinlock_t tx_clean_lock;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
/*
|
||||
@ -808,6 +811,7 @@ struct qlcnic_mac_list_s {
|
||||
|
||||
#define QLCNIC_ILB_MODE 0x1
|
||||
#define QLCNIC_ELB_MODE 0x2
|
||||
#define QLCNIC_LB_MODE_MASK 0x3
|
||||
|
||||
#define QLCNIC_LINKEVENT 0x1
|
||||
#define QLCNIC_LB_RESPONSE 0x2
|
||||
@ -1093,7 +1097,6 @@ struct qlcnic_adapter {
|
||||
struct qlcnic_filter_hash rx_fhash;
|
||||
struct list_head vf_mc_list;
|
||||
|
||||
spinlock_t tx_clean_lock;
|
||||
spinlock_t mac_learn_lock;
|
||||
/* spinlock for catching rcv filters for eswitch traffic */
|
||||
spinlock_t rx_mac_learn_lock;
|
||||
|
@ -1684,12 +1684,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
|
||||
}
|
||||
} while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
|
||||
|
||||
/* Make sure carrier is off and queue is stopped during loopback */
|
||||
if (netif_running(netdev)) {
|
||||
netif_carrier_off(netdev);
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
}
|
||||
|
||||
ret = qlcnic_do_lb_test(adapter, mode);
|
||||
|
||||
qlcnic_83xx_clear_lb_mode(adapter, mode);
|
||||
@ -2121,6 +2115,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
|
||||
ahw->link_autoneg = MSB(MSW(data[3]));
|
||||
ahw->module_type = MSB(LSW(data[3]));
|
||||
ahw->has_link_events = 1;
|
||||
ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
|
||||
qlcnic_advert_link_change(adapter, link_status);
|
||||
}
|
||||
|
||||
|
@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
|
||||
struct qlcnic_skb_frag *buffrag;
|
||||
int i, j;
|
||||
|
||||
spin_lock(&tx_ring->tx_clean_lock);
|
||||
|
||||
cmd_buf = tx_ring->cmd_buf_arr;
|
||||
for (i = 0; i < tx_ring->num_desc; i++) {
|
||||
buffrag = cmd_buf->frag_array;
|
||||
@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
|
||||
}
|
||||
cmd_buf++;
|
||||
}
|
||||
|
||||
spin_unlock(&tx_ring->tx_clean_lock);
|
||||
}
|
||||
|
||||
void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
|
||||
|
@ -689,6 +689,10 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
|
||||
adapter->ahw->linkup = 0;
|
||||
netif_carrier_off(netdev);
|
||||
} else if (!adapter->ahw->linkup && linkup) {
|
||||
/* Do not advertise Link up if the port is in loopback mode */
|
||||
if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
|
||||
return;
|
||||
|
||||
netdev_info(netdev, "NIC Link is up\n");
|
||||
adapter->ahw->linkup = 1;
|
||||
netif_carrier_on(netdev);
|
||||
@ -778,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
struct qlcnic_skb_frag *frag;
|
||||
|
||||
if (!spin_trylock(&adapter->tx_clean_lock))
|
||||
if (!spin_trylock(&tx_ring->tx_clean_lock))
|
||||
return 1;
|
||||
|
||||
sw_consumer = tx_ring->sw_consumer;
|
||||
@ -807,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
|
||||
break;
|
||||
}
|
||||
|
||||
tx_ring->sw_consumer = sw_consumer;
|
||||
|
||||
if (count && netif_running(netdev)) {
|
||||
tx_ring->sw_consumer = sw_consumer;
|
||||
smp_mb();
|
||||
if (netif_tx_queue_stopped(tx_ring->txq) &&
|
||||
netif_carrier_ok(netdev)) {
|
||||
@ -834,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
|
||||
*/
|
||||
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
|
||||
done = (sw_consumer == hw_consumer);
|
||||
spin_unlock(&adapter->tx_clean_lock);
|
||||
|
||||
spin_unlock(&tx_ring->tx_clean_lock);
|
||||
|
||||
return done;
|
||||
}
|
||||
|
@ -1756,7 +1756,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
|
||||
if (qlcnic_sriov_vf_check(adapter))
|
||||
qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
|
||||
smp_mb();
|
||||
spin_lock(&adapter->tx_clean_lock);
|
||||
netif_carrier_off(netdev);
|
||||
adapter->ahw->linkup = 0;
|
||||
netif_tx_disable(netdev);
|
||||
@ -1777,7 +1776,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
|
||||
|
||||
for (ring = 0; ring < adapter->drv_tx_rings; ring++)
|
||||
qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
|
||||
spin_unlock(&adapter->tx_clean_lock);
|
||||
}
|
||||
|
||||
/* Usage: During suspend and firmware recovery module */
|
||||
@ -2172,6 +2170,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
|
||||
}
|
||||
memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
|
||||
tx_ring->cmd_buf_arr = cmd_buf_arr;
|
||||
spin_lock_init(&tx_ring->tx_clean_lock);
|
||||
}
|
||||
|
||||
if (qlcnic_83xx_check(adapter) ||
|
||||
@ -2299,7 +2298,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
rwlock_init(&adapter->ahw->crb_lock);
|
||||
mutex_init(&adapter->ahw->mem_lock);
|
||||
|
||||
spin_lock_init(&adapter->tx_clean_lock);
|
||||
INIT_LIST_HEAD(&adapter->mac_list);
|
||||
|
||||
qlcnic_register_dcb(adapter);
|
||||
|
@ -75,7 +75,6 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
|
||||
num_vfs = sriov->num_vfs;
|
||||
max = num_vfs + 1;
|
||||
info->bit_offsets = 0xffff;
|
||||
info->max_tx_ques = res->num_tx_queues / max;
|
||||
info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
|
||||
num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
|
||||
|
||||
@ -86,6 +85,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
|
||||
info->max_tx_mac_filters = temp;
|
||||
info->min_tx_bw = 0;
|
||||
info->max_tx_bw = MAX_BW;
|
||||
info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
|
||||
} else {
|
||||
id = qlcnic_sriov_func_to_index(adapter, func);
|
||||
if (id < 0)
|
||||
@ -95,6 +95,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
|
||||
info->max_tx_bw = vp->max_tx_bw;
|
||||
info->max_rx_ucast_mac_filters = num_vf_macs;
|
||||
info->max_tx_mac_filters = num_vf_macs;
|
||||
info->max_tx_ques = QLCNIC_SINGLE_RING;
|
||||
}
|
||||
|
||||
info->max_rx_ip_addr = res->num_destip / max;
|
||||
|
@ -185,7 +185,6 @@ enum rx_ctrl_state{
|
||||
#define BM_REQUEST_TYPE (0xa1)
|
||||
#define B_NOTIFICATION (0x20)
|
||||
#define W_VALUE (0x0)
|
||||
#define W_INDEX (0x2)
|
||||
#define W_LENGTH (0x2)
|
||||
|
||||
#define B_OVERRUN (0x1<<6)
|
||||
@ -1487,6 +1486,7 @@ static void tiocmget_intr_callback(struct urb *urb)
|
||||
struct uart_icount *icount;
|
||||
struct hso_serial_state_notification *serial_state_notification;
|
||||
struct usb_device *usb;
|
||||
int if_num;
|
||||
|
||||
/* Sanity checks */
|
||||
if (!serial)
|
||||
@ -1495,15 +1495,24 @@ static void tiocmget_intr_callback(struct urb *urb)
|
||||
handle_usb_error(status, __func__, serial->parent);
|
||||
return;
|
||||
}
|
||||
|
||||
/* tiocmget is only supported on HSO_PORT_MODEM */
|
||||
tiocmget = serial->tiocmget;
|
||||
if (!tiocmget)
|
||||
return;
|
||||
BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM);
|
||||
|
||||
usb = serial->parent->usb;
|
||||
if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
|
||||
|
||||
/* wIndex should be the USB interface number of the port to which the
|
||||
* notification applies, which should always be the Modem port.
|
||||
*/
|
||||
serial_state_notification = &tiocmget->serial_state_notification;
|
||||
if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
|
||||
serial_state_notification->bNotification != B_NOTIFICATION ||
|
||||
le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
|
||||
le16_to_cpu(serial_state_notification->wIndex) != W_INDEX ||
|
||||
le16_to_cpu(serial_state_notification->wIndex) != if_num ||
|
||||
le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) {
|
||||
dev_warn(&usb->dev,
|
||||
"hso received invalid serial state notification\n");
|
||||
|
@ -117,7 +117,6 @@ enum {
|
||||
struct mcs7830_data {
|
||||
u8 multi_filter[8];
|
||||
u8 config;
|
||||
u8 link_counter;
|
||||
};
|
||||
|
||||
static const char driver_name[] = "MOSCHIP usb-ethernet driver";
|
||||
@ -561,26 +560,16 @@ static void mcs7830_status(struct usbnet *dev, struct urb *urb)
|
||||
{
|
||||
u8 *buf = urb->transfer_buffer;
|
||||
bool link, link_changed;
|
||||
struct mcs7830_data *data = mcs7830_get_data(dev);
|
||||
|
||||
if (urb->actual_length < 16)
|
||||
return;
|
||||
|
||||
link = !(buf[1] & 0x20);
|
||||
link = !(buf[1] == 0x20);
|
||||
link_changed = netif_carrier_ok(dev->net) != link;
|
||||
if (link_changed) {
|
||||
data->link_counter++;
|
||||
/*
|
||||
track link state 20 times to guard against erroneous
|
||||
link state changes reported sometimes by the chip
|
||||
*/
|
||||
if (data->link_counter > 20) {
|
||||
data->link_counter = 0;
|
||||
usbnet_link_change(dev, link, 0);
|
||||
netdev_dbg(dev->net, "Link Status is: %d\n", link);
|
||||
}
|
||||
} else
|
||||
data->link_counter = 0;
|
||||
usbnet_link_change(dev, link, 0);
|
||||
netdev_dbg(dev->net, "Link Status is: %d\n", link);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct driver_info moschip_info = {
|
||||
|
@ -1797,16 +1797,17 @@ static int virtnet_restore(struct virtio_device *vdev)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (netif_running(vi->dev))
|
||||
if (netif_running(vi->dev)) {
|
||||
for (i = 0; i < vi->curr_queue_pairs; i++)
|
||||
if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
|
||||
for (i = 0; i < vi->max_queue_pairs; i++)
|
||||
virtnet_napi_enable(&vi->rq[i]);
|
||||
}
|
||||
|
||||
netif_device_attach(vi->dev);
|
||||
|
||||
for (i = 0; i < vi->curr_queue_pairs; i++)
|
||||
if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
|
||||
schedule_delayed_work(&vi->refill, 0);
|
||||
|
||||
mutex_lock(&vi->config_lock);
|
||||
vi->config_enable = true;
|
||||
mutex_unlock(&vi->config_lock);
|
||||
|
@ -2440,7 +2440,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
|
||||
/* update header length based on lower device */
|
||||
dev->hard_header_len = lowerdev->hard_header_len +
|
||||
(use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
|
||||
}
|
||||
} else if (use_ipv6)
|
||||
vxlan->flags |= VXLAN_F_IPV6;
|
||||
|
||||
if (data[IFLA_VXLAN_TOS])
|
||||
vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <xen/events.h>
|
||||
#include <asm/xen/hypercall.h>
|
||||
|
@ -1912,6 +1912,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
|
||||
return dev->header_ops->parse(skb, haddr);
|
||||
}
|
||||
|
||||
static inline int dev_rebuild_header(struct sk_buff *skb)
|
||||
{
|
||||
const struct net_device *dev = skb->dev;
|
||||
|
||||
if (!dev->header_ops || !dev->header_ops->rebuild)
|
||||
return 0;
|
||||
return dev->header_ops->rebuild(skb);
|
||||
}
|
||||
|
||||
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
|
||||
int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
|
||||
static inline int unregister_gifconf(unsigned int family)
|
||||
@ -3008,6 +3017,19 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
|
||||
dev->gso_max_size = size;
|
||||
}
|
||||
|
||||
static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
|
||||
int pulled_hlen, u16 mac_offset,
|
||||
int mac_len)
|
||||
{
|
||||
skb->protocol = protocol;
|
||||
skb->encapsulation = 1;
|
||||
skb_push(skb, pulled_hlen);
|
||||
skb_reset_transport_header(skb);
|
||||
skb->mac_header = mac_offset;
|
||||
skb->network_header = skb->mac_header + mac_len;
|
||||
skb->mac_len = mac_len;
|
||||
}
|
||||
|
||||
static inline bool netif_is_macvlan(struct net_device *dev)
|
||||
{
|
||||
return dev->priv_flags & IFF_MACVLAN;
|
||||
|
@ -142,7 +142,7 @@
|
||||
#define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0)
|
||||
|
||||
#define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1)
|
||||
#define PDU_GET_NEXT_Vr(sn) (++sn & ~LLC_2_SEQ_NBR_MODULO)
|
||||
#define PDU_GET_NEXT_Vr(sn) (((sn) + 1) & ~LLC_2_SEQ_NBR_MODULO)
|
||||
|
||||
/* FRMR information field macros */
|
||||
|
||||
|
@ -1046,9 +1046,6 @@ struct sctp_outq {
|
||||
|
||||
/* Corked? */
|
||||
char cork;
|
||||
|
||||
/* Is this structure empty? */
|
||||
char empty;
|
||||
};
|
||||
|
||||
void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
|
||||
|
@ -530,6 +530,23 @@ static const struct header_ops vlan_header_ops = {
|
||||
.parse = eth_header_parse,
|
||||
};
|
||||
|
||||
static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
|
||||
unsigned short type,
|
||||
const void *daddr, const void *saddr,
|
||||
unsigned int len)
|
||||
{
|
||||
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
|
||||
struct net_device *real_dev = vlan->real_dev;
|
||||
|
||||
return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
|
||||
}
|
||||
|
||||
static const struct header_ops vlan_passthru_header_ops = {
|
||||
.create = vlan_passthru_hard_header,
|
||||
.rebuild = dev_rebuild_header,
|
||||
.parse = eth_header_parse,
|
||||
};
|
||||
|
||||
static struct device_type vlan_type = {
|
||||
.name = "vlan",
|
||||
};
|
||||
@ -573,7 +590,7 @@ static int vlan_dev_init(struct net_device *dev)
|
||||
|
||||
dev->needed_headroom = real_dev->needed_headroom;
|
||||
if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
|
||||
dev->header_ops = real_dev->header_ops;
|
||||
dev->header_ops = &vlan_passthru_header_ops;
|
||||
dev->hard_header_len = real_dev->hard_header_len;
|
||||
} else {
|
||||
dev->header_ops = &vlan_header_ops;
|
||||
|
@ -1998,7 +1998,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
|
||||
u32 old;
|
||||
struct net_bridge_mdb_htable *mdb;
|
||||
|
||||
spin_lock(&br->multicast_lock);
|
||||
spin_lock_bh(&br->multicast_lock);
|
||||
if (!netif_running(br->dev))
|
||||
goto unlock;
|
||||
|
||||
@ -2030,7 +2030,7 @@ rollback:
|
||||
}
|
||||
|
||||
unlock:
|
||||
spin_unlock(&br->multicast_lock);
|
||||
spin_unlock_bh(&br->multicast_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1275,7 +1275,7 @@ int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
|
||||
|
||||
if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
|
||||
skb->len) < 0 &&
|
||||
dev->header_ops->rebuild(skb))
|
||||
dev_rebuild_header(skb))
|
||||
return 0;
|
||||
|
||||
return dev_queue_xmit(skb);
|
||||
|
@ -386,8 +386,14 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
|
||||
!vlan_hw_offload_capable(netif_skb_features(skb),
|
||||
skb->vlan_proto)) {
|
||||
skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
|
||||
if (unlikely(!skb))
|
||||
break;
|
||||
if (unlikely(!skb)) {
|
||||
/* This is actually a packet drop, but we
|
||||
* don't want the code at the end of this
|
||||
* function to try and re-queue a NULL skb.
|
||||
*/
|
||||
status = NETDEV_TX_OK;
|
||||
goto unlock_txq;
|
||||
}
|
||||
skb->vlan_tci = 0;
|
||||
}
|
||||
|
||||
@ -395,6 +401,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
|
||||
if (status == NETDEV_TX_OK)
|
||||
txq_trans_update(txq);
|
||||
}
|
||||
unlock_txq:
|
||||
__netif_tx_unlock(txq);
|
||||
|
||||
if (status == NETDEV_TX_OK)
|
||||
|
@ -547,7 +547,7 @@ static int lowpan_header_create(struct sk_buff *skb,
|
||||
hc06_ptr += 3;
|
||||
} else {
|
||||
/* compress nothing */
|
||||
memcpy(hc06_ptr, &hdr, 4);
|
||||
memcpy(hc06_ptr, hdr, 4);
|
||||
/* replace the top byte with new ECN | DSCP format */
|
||||
*hc06_ptr = tmp;
|
||||
hc06_ptr += 4;
|
||||
|
@ -28,6 +28,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
|
||||
netdev_features_t enc_features;
|
||||
int ghl = GRE_HEADER_SECTION;
|
||||
struct gre_base_hdr *greh;
|
||||
u16 mac_offset = skb->mac_header;
|
||||
int mac_len = skb->mac_len;
|
||||
__be16 protocol = skb->protocol;
|
||||
int tnl_hlen;
|
||||
@ -58,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
|
||||
} else
|
||||
csum = false;
|
||||
|
||||
if (unlikely(!pskb_may_pull(skb, ghl)))
|
||||
goto out;
|
||||
|
||||
/* setup inner skb. */
|
||||
skb->protocol = greh->protocol;
|
||||
skb->encapsulation = 0;
|
||||
|
||||
if (unlikely(!pskb_may_pull(skb, ghl)))
|
||||
goto out;
|
||||
|
||||
__skb_pull(skb, ghl);
|
||||
skb_reset_mac_header(skb);
|
||||
skb_set_network_header(skb, skb_inner_network_offset(skb));
|
||||
@ -73,8 +74,10 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
|
||||
/* segment inner packet. */
|
||||
enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
|
||||
segs = skb_mac_gso_segment(skb, enc_features);
|
||||
if (!segs || IS_ERR(segs))
|
||||
if (!segs || IS_ERR(segs)) {
|
||||
skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
|
||||
goto out;
|
||||
}
|
||||
|
||||
skb = segs;
|
||||
tnl_hlen = skb_tnl_header_len(skb);
|
||||
|
@ -2478,6 +2478,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
|
||||
netdev_features_t features)
|
||||
{
|
||||
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
||||
u16 mac_offset = skb->mac_header;
|
||||
int mac_len = skb->mac_len;
|
||||
int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
|
||||
__be16 protocol = skb->protocol;
|
||||
@ -2497,8 +2498,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
|
||||
/* segment inner packet. */
|
||||
enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
|
||||
segs = skb_mac_gso_segment(skb, enc_features);
|
||||
if (!segs || IS_ERR(segs))
|
||||
if (!segs || IS_ERR(segs)) {
|
||||
skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
|
||||
mac_len);
|
||||
goto out;
|
||||
}
|
||||
|
||||
outer_hlen = skb_tnl_header_len(skb);
|
||||
skb = segs;
|
||||
|
@ -41,6 +41,14 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
|
||||
{
|
||||
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
||||
unsigned int mss;
|
||||
int offset;
|
||||
__wsum csum;
|
||||
|
||||
if (skb->encapsulation &&
|
||||
skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) {
|
||||
segs = skb_udp_tunnel_segment(skb, features);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mss = skb_shinfo(skb)->gso_size;
|
||||
if (unlikely(skb->len <= mss))
|
||||
@ -63,27 +71,20 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Do software UFO. Complete and fill in the UDP checksum as
|
||||
* HW cannot do checksum of UDP packets sent as multiple
|
||||
* IP fragments.
|
||||
*/
|
||||
offset = skb_checksum_start_offset(skb);
|
||||
csum = skb_checksum(skb, offset, skb->len - offset, 0);
|
||||
offset += skb->csum_offset;
|
||||
*(__sum16 *)(skb->data + offset) = csum_fold(csum);
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
/* Fragment the skb. IP headers of the fragments are updated in
|
||||
* inet_gso_segment()
|
||||
*/
|
||||
if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL)
|
||||
segs = skb_udp_tunnel_segment(skb, features);
|
||||
else {
|
||||
int offset;
|
||||
__wsum csum;
|
||||
|
||||
/* Do software UFO. Complete and fill in the UDP checksum as
|
||||
* HW cannot do checksum of UDP packets sent as multiple
|
||||
* IP fragments.
|
||||
*/
|
||||
offset = skb_checksum_start_offset(skb);
|
||||
csum = skb_checksum(skb, offset, skb->len - offset, 0);
|
||||
offset += skb->csum_offset;
|
||||
*(__sum16 *)(skb->data + offset) = csum_fold(csum);
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
|
||||
segs = skb_segment(skb, features);
|
||||
}
|
||||
segs = skb_segment(skb, features);
|
||||
out:
|
||||
return segs;
|
||||
}
|
||||
|
@ -1671,7 +1671,7 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
|
||||
static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
|
||||
{
|
||||
struct in6_addr addr;
|
||||
if (ifp->prefix_len == 127) /* RFC 6164 */
|
||||
if (ifp->prefix_len >= 127) /* RFC 6164 */
|
||||
return;
|
||||
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
|
||||
if (ipv6_addr_any(&addr))
|
||||
@ -1682,7 +1682,7 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
|
||||
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
|
||||
{
|
||||
struct in6_addr addr;
|
||||
if (ifp->prefix_len == 127) /* RFC 6164 */
|
||||
if (ifp->prefix_len >= 127) /* RFC 6164 */
|
||||
return;
|
||||
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
|
||||
if (ipv6_addr_any(&addr))
|
||||
@ -3456,7 +3456,12 @@ restart:
|
||||
&inet6_addr_lst[i], addr_lst) {
|
||||
unsigned long age;
|
||||
|
||||
if (ifp->flags & IFA_F_PERMANENT)
|
||||
/* When setting preferred_lft to a value not zero or
|
||||
* infinity, while valid_lft is infinity
|
||||
* IFA_F_PERMANENT has a non-infinity life time.
|
||||
*/
|
||||
if ((ifp->flags & IFA_F_PERMANENT) &&
|
||||
(ifp->prefered_lft == INFINITY_LIFE_TIME))
|
||||
continue;
|
||||
|
||||
spin_lock(&ifp->lock);
|
||||
@ -3481,7 +3486,8 @@ restart:
|
||||
ifp->flags |= IFA_F_DEPRECATED;
|
||||
}
|
||||
|
||||
if (time_before(ifp->tstamp + ifp->valid_lft * HZ, next))
|
||||
if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
|
||||
(time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
|
||||
next = ifp->tstamp + ifp->valid_lft * HZ;
|
||||
|
||||
spin_unlock(&ifp->lock);
|
||||
@ -3761,7 +3767,8 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
|
||||
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
|
||||
ifa->idev->dev->ifindex);
|
||||
|
||||
if (!(ifa->flags&IFA_F_PERMANENT)) {
|
||||
if (!((ifa->flags&IFA_F_PERMANENT) &&
|
||||
(ifa->prefered_lft == INFINITY_LIFE_TIME))) {
|
||||
preferred = ifa->prefered_lft;
|
||||
valid = ifa->valid_lft;
|
||||
if (preferred != INFINITY_LIFE_TIME) {
|
||||
|
@ -103,16 +103,25 @@ struct ip6_tnl_net {
|
||||
|
||||
static struct net_device_stats *ip6_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct pcpu_tstats sum = { 0 };
|
||||
struct pcpu_tstats tmp, sum = { 0 };
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
unsigned int start;
|
||||
const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
|
||||
|
||||
sum.rx_packets += tstats->rx_packets;
|
||||
sum.rx_bytes += tstats->rx_bytes;
|
||||
sum.tx_packets += tstats->tx_packets;
|
||||
sum.tx_bytes += tstats->tx_bytes;
|
||||
do {
|
||||
start = u64_stats_fetch_begin_bh(&tstats->syncp);
|
||||
tmp.rx_packets = tstats->rx_packets;
|
||||
tmp.rx_bytes = tstats->rx_bytes;
|
||||
tmp.tx_packets = tstats->tx_packets;
|
||||
tmp.tx_bytes = tstats->tx_bytes;
|
||||
} while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
|
||||
|
||||
sum.rx_packets += tmp.rx_packets;
|
||||
sum.rx_bytes += tmp.rx_bytes;
|
||||
sum.tx_packets += tmp.tx_packets;
|
||||
sum.tx_bytes += tmp.tx_bytes;
|
||||
}
|
||||
dev->stats.rx_packets = sum.rx_packets;
|
||||
dev->stats.rx_bytes = sum.rx_bytes;
|
||||
@ -824,8 +833,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
|
||||
}
|
||||
|
||||
tstats = this_cpu_ptr(t->dev->tstats);
|
||||
u64_stats_update_begin(&tstats->syncp);
|
||||
tstats->rx_packets++;
|
||||
tstats->rx_bytes += skb->len;
|
||||
u64_stats_update_end(&tstats->syncp);
|
||||
|
||||
netif_rx(skb);
|
||||
|
||||
|
@ -75,26 +75,6 @@ struct vti6_net {
|
||||
struct ip6_tnl __rcu **tnls[2];
|
||||
};
|
||||
|
||||
static struct net_device_stats *vti6_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct pcpu_tstats sum = { 0 };
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
|
||||
|
||||
sum.rx_packets += tstats->rx_packets;
|
||||
sum.rx_bytes += tstats->rx_bytes;
|
||||
sum.tx_packets += tstats->tx_packets;
|
||||
sum.tx_bytes += tstats->tx_bytes;
|
||||
}
|
||||
dev->stats.rx_packets = sum.rx_packets;
|
||||
dev->stats.rx_bytes = sum.rx_bytes;
|
||||
dev->stats.tx_packets = sum.tx_packets;
|
||||
dev->stats.tx_bytes = sum.tx_bytes;
|
||||
return &dev->stats;
|
||||
}
|
||||
|
||||
#define for_each_vti6_tunnel_rcu(start) \
|
||||
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
|
||||
|
||||
@ -331,8 +311,10 @@ static int vti6_rcv(struct sk_buff *skb)
|
||||
}
|
||||
|
||||
tstats = this_cpu_ptr(t->dev->tstats);
|
||||
u64_stats_update_begin(&tstats->syncp);
|
||||
tstats->rx_packets++;
|
||||
tstats->rx_bytes += skb->len;
|
||||
u64_stats_update_end(&tstats->syncp);
|
||||
|
||||
skb->mark = 0;
|
||||
secpath_reset(skb);
|
||||
@ -716,7 +698,7 @@ static const struct net_device_ops vti6_netdev_ops = {
|
||||
.ndo_start_xmit = vti6_tnl_xmit,
|
||||
.ndo_do_ioctl = vti6_ioctl,
|
||||
.ndo_change_mtu = vti6_change_mtu,
|
||||
.ndo_get_stats = vti6_get_stats,
|
||||
.ndo_get_stats64 = ip_tunnel_get_stats64,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -702,8 +702,10 @@ static int ipip6_rcv(struct sk_buff *skb)
|
||||
}
|
||||
|
||||
tstats = this_cpu_ptr(tunnel->dev->tstats);
|
||||
u64_stats_update_begin(&tstats->syncp);
|
||||
tstats->rx_packets++;
|
||||
tstats->rx_bytes += skb->len;
|
||||
u64_stats_update_end(&tstats->syncp);
|
||||
|
||||
netif_rx(skb);
|
||||
|
||||
|
@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||
unsigned long cpu_flags;
|
||||
size_t copied = 0;
|
||||
u32 peek_seq = 0;
|
||||
u32 *seq;
|
||||
u32 *seq, skb_len;
|
||||
unsigned long used;
|
||||
int target; /* Read at least this many bytes */
|
||||
long timeo;
|
||||
@ -812,6 +812,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||
}
|
||||
continue;
|
||||
found_ok_skb:
|
||||
skb_len = skb->len;
|
||||
/* Ok so how much can we use? */
|
||||
used = skb->len - offset;
|
||||
if (len < used)
|
||||
@ -844,7 +845,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||
}
|
||||
|
||||
/* Partial read */
|
||||
if (used + offset < skb->len)
|
||||
if (used + offset < skb_len)
|
||||
continue;
|
||||
} while (len > 0);
|
||||
|
||||
|
@ -208,8 +208,6 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
|
||||
INIT_LIST_HEAD(&q->retransmit);
|
||||
INIT_LIST_HEAD(&q->sacked);
|
||||
INIT_LIST_HEAD(&q->abandoned);
|
||||
|
||||
q->empty = 1;
|
||||
}
|
||||
|
||||
/* Free the outqueue structure and any related pending chunks.
|
||||
@ -332,7 +330,6 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
|
||||
SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
|
||||
else
|
||||
SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
|
||||
q->empty = 0;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
@ -654,7 +651,6 @@ redo:
|
||||
if (chunk->fast_retransmit == SCTP_NEED_FRTX)
|
||||
chunk->fast_retransmit = SCTP_DONT_FRTX;
|
||||
|
||||
q->empty = 0;
|
||||
q->asoc->stats.rtxchunks++;
|
||||
break;
|
||||
}
|
||||
@ -1065,8 +1061,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
|
||||
|
||||
sctp_transport_reset_timers(transport);
|
||||
|
||||
q->empty = 0;
|
||||
|
||||
/* Only let one DATA chunk get bundled with a
|
||||
* COOKIE-ECHO chunk.
|
||||
*/
|
||||
@ -1275,29 +1269,17 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
|
||||
"advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
|
||||
asoc->adv_peer_ack_point);
|
||||
|
||||
/* See if all chunks are acked.
|
||||
* Make sure the empty queue handler will get run later.
|
||||
*/
|
||||
q->empty = (list_empty(&q->out_chunk_list) &&
|
||||
list_empty(&q->retransmit));
|
||||
if (!q->empty)
|
||||
goto finish;
|
||||
|
||||
list_for_each_entry(transport, transport_list, transports) {
|
||||
q->empty = q->empty && list_empty(&transport->transmitted);
|
||||
if (!q->empty)
|
||||
goto finish;
|
||||
}
|
||||
|
||||
pr_debug("%s: sack queue is empty\n", __func__);
|
||||
finish:
|
||||
return q->empty;
|
||||
return sctp_outq_is_empty(q);
|
||||
}
|
||||
|
||||
/* Is the outqueue empty? */
|
||||
/* Is the outqueue empty?
|
||||
* The queue is empty when we have not pending data, no in-flight data
|
||||
* and nothing pending retransmissions.
|
||||
*/
|
||||
int sctp_outq_is_empty(const struct sctp_outq *q)
|
||||
{
|
||||
return q->empty;
|
||||
return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
|
||||
list_empty(&q->retransmit);
|
||||
}
|
||||
|
||||
/********************************************************************
|
||||
|
Loading…
Reference in New Issue
Block a user