mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 14:42:24 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix SKB leak in 8139cp, from Dave Jones. 2) Fix use of *_PAGES interfaces with mlx5 firmware, from Moshe Lazar. 3) RCU conversion of macvtap introduced two races, fixes by Eric Dumazet 4) Synchronize statistic flows in bnx2x driver to prevent corruption, from Dmitry Kravkov 5) Undo optimization in IP tunneling, we were using the inner IP header in some cases to inherit the IP ID, but that isn't correct in some circumstances. From Pravin B Shelar 6) Use correct struct size when parsing netlink attributes in rtnl_bridge_getlink(). From Asbjoern Sloth Toennesen 7) Length verifications in tun_get_user() are bogus, from Weiping Pan and Dan Carpenter 8) Fix bad merge resolution during 3.11 networking development in openvswitch, albeit a harmless one which added some unreachable code. From Jesse Gross 9) Wrong size used in flexible array allocation in openvswitch, from Pravin B Shelar 10) Clear out firmware capability flags the be2net driver isn't ready to handle yet, from Sarveshwar Bandi 11) Revert DMA mapping error checking addition to cxgb3 driver, it's buggy. From Alexey Kardashevskiy 12) Fix regression in packet scheduler rate limiting when working with a link layer of ATM. From Jesper Dangaard Brouer 13) Fix several errors in TCP Cubic congestion control, in particular overflow errors in timestamp calculations. From Eric Dumazet and Van Jacobson 14) In ipv6 routing lookups, we need to backtrack if subtree traversal don't result in a match. From Hannes Frederic Sowa 15) ipgre_header() returns incorrect packet offset. Fix from Timo Teräs 16) Get "low latency" out of the new MIB counter names. From Eliezer Tamir 17) State check in ndo_dflt_fdb_del() is inverted, from Sridhar Samudrala 18) Handle TCP Fast Open properly in netfilter conntrack, from Yuchung Cheng 19) Wrong memcpy length in pcan_usb driver, from Stephane Grosjean 20) Fix dealock in TIPC, from Wang Weidong and Ding Tianhong 21) call_rcu() call to destroy SCTP transport is done too early and might result in an oops. From Daniel Borkmann 22) Fix races in genetlink family dumps, from Johannes Berg 23) Flags passed into macvlan by the user need to be validated properly, from Michael S Tsirkin 24) Fix skge build on 32-bit, from Stephen Hemminger 25) Handle malformed TCP headers properly in xt_TCPMSS, from Pablo Neira Ayuso 26) Fix handling of stacked vlans in vlan_dev_real_dev(), from Nikolay Aleksandrov 27) Eliminate MTU calculation overflows in esp{4,6}, from Daniel Borkmann 28) neigh_parms need to be setup before calling the ->ndo_neigh_setup() method. From Veaceslav Falico 29) Kill out-of-bounds prefetch in fib_trie, from Eric Dumazet 30) Don't dereference MLD query message if the length isn't value in the bridge multicast code, from Linus Lüssing 31) Fix VXLAN IGMP join regression due to an inverted check, from Cong Wang * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (70 commits) net/mlx5_core: Support MANAGE_PAGES and QUERY_PAGES firmware command changes tun: signedness bug in tun_get_user() qlcnic: Fix diagnostic interrupt test for 83xx adapters qlcnic: Fix beacon state return status handling qlcnic: Fix set driver version command net: tg3: fix NULL pointer dereference in tg3_io_error_detected and tg3_io_slot_reset net_sched: restore "linklayer atm" handling drivers/net/ethernet/via/via-velocity.c: update napi implementation Revert "cxgb3: Check and handle the dma mapping errors" be2net: Clear any capability flags that driver is not interested in. openvswitch: Reset tunnel key between input and output. openvswitch: Use correct type while allocating flex array. openvswitch: Fix bad merge resolution. tun: compare with 0 instead of total_len rtnetlink: rtnl_bridge_getlink: Call nlmsg_find_attr() with ifinfomsg header ethernet/arc/arc_emac - fix NAPI "work > weight" warning ip_tunnel: Do not use inner ip-header-id for tunnel ip-header-id. bnx2x: prevent crash in shutdown flow with CNIC bnx2x: fix PTE write access error bnx2x: fix memory leak in VF ...
This commit is contained in:
commit
ddea368c78
@ -5581,9 +5581,9 @@ S: Maintained
|
||||
F: drivers/media/tuners/mxl5007t.*
|
||||
|
||||
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
|
||||
M: Andrew Gallatin <gallatin@myri.com>
|
||||
M: Hyong-Youb Kim <hykim@myri.com>
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://www.myri.com/scs/download-Myri10GE.html
|
||||
W: https://www.myricom.com/support/downloads/myri10ge.html
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/myricom/myri10ge/
|
||||
|
||||
|
@ -3714,11 +3714,17 @@ static int bond_neigh_init(struct neighbour *n)
|
||||
* The bonding ndo_neigh_setup is called at init time beofre any
|
||||
* slave exists. So we must declare proxy setup function which will
|
||||
* be used at run time to resolve the actual slave neigh param setup.
|
||||
*
|
||||
* It's also called by master devices (such as vlans) to setup their
|
||||
* underlying devices. In that case - do nothing, we're already set up from
|
||||
* our init.
|
||||
*/
|
||||
static int bond_neigh_setup(struct net_device *dev,
|
||||
struct neigh_parms *parms)
|
||||
{
|
||||
parms->neigh_setup = bond_neigh_init;
|
||||
/* modify only our neigh_parms */
|
||||
if (parms->dev == dev)
|
||||
parms->neigh_setup = bond_neigh_init;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
|
||||
if ((mc->ptr + rec_len) > mc->end)
|
||||
goto decode_failed;
|
||||
|
||||
memcpy(cf->data, mc->ptr, rec_len);
|
||||
memcpy(cf->data, mc->ptr, cf->can_dlc);
|
||||
mc->ptr += rec_len;
|
||||
}
|
||||
|
||||
|
@ -199,7 +199,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
|
||||
struct arc_emac_priv *priv = netdev_priv(ndev);
|
||||
unsigned int work_done;
|
||||
|
||||
for (work_done = 0; work_done <= budget; work_done++) {
|
||||
for (work_done = 0; work_done < budget; work_done++) {
|
||||
unsigned int *last_rx_bd = &priv->last_rx_bd;
|
||||
struct net_device_stats *stats = &priv->stats;
|
||||
struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
|
||||
|
@ -1502,6 +1502,7 @@ struct bnx2x {
|
||||
#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
|
||||
#define IS_VF_FLAG (1 << 22)
|
||||
#define INTERRUPTS_ENABLED_FLAG (1 << 23)
|
||||
#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
|
||||
|
||||
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
|
||||
|
||||
@ -1830,6 +1831,8 @@ struct bnx2x {
|
||||
|
||||
int fp_array_size;
|
||||
u32 dump_preset_idx;
|
||||
bool stats_started;
|
||||
struct semaphore stats_sema;
|
||||
};
|
||||
|
||||
/* Tx queues may be less or equal to Rx queues */
|
||||
@ -2451,4 +2454,6 @@ enum bnx2x_pci_bus_speed {
|
||||
BNX2X_PCI_LINK_SPEED_5000 = 5000,
|
||||
BNX2X_PCI_LINK_SPEED_8000 = 8000
|
||||
};
|
||||
|
||||
void bnx2x_set_local_cmng(struct bnx2x *bp);
|
||||
#endif /* bnx2x.h */
|
||||
|
@ -753,6 +753,10 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
|
||||
bnx2x_pfc_set_pfc(bp);
|
||||
|
||||
bnx2x_dcbx_update_ets_params(bp);
|
||||
|
||||
/* ets may affect cmng configuration: reinit it in hw */
|
||||
bnx2x_set_local_cmng(bp);
|
||||
|
||||
bnx2x_dcbx_resume_hw_tx(bp);
|
||||
|
||||
return;
|
||||
|
@ -1300,6 +1300,9 @@ struct drv_func_mb {
|
||||
|
||||
#define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
|
||||
|
||||
#define DRV_MSG_CODE_RMMOD 0xdb000000
|
||||
#define REQ_BC_VER_4_RMMOD_CMD 0x0007080f
|
||||
|
||||
#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
|
||||
#define REQ_BC_VER_4_SET_MF_BW 0x00060202
|
||||
#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
|
||||
@ -1372,6 +1375,8 @@ struct drv_func_mb {
|
||||
|
||||
#define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
|
||||
|
||||
#define FW_MSG_CODE_RMMOD_ACK 0xdb100000
|
||||
|
||||
#define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
|
||||
#define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
|
||||
|
||||
|
@ -2476,7 +2476,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
|
||||
|
||||
input.port_rate = bp->link_vars.line_speed;
|
||||
|
||||
if (cmng_type == CMNG_FNS_MINMAX) {
|
||||
if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
|
||||
int vn;
|
||||
|
||||
/* read mf conf from shmem */
|
||||
@ -2533,6 +2533,21 @@ static void storm_memset_cmng(struct bnx2x *bp,
|
||||
}
|
||||
}
|
||||
|
||||
/* init cmng mode in HW according to local configuration */
|
||||
void bnx2x_set_local_cmng(struct bnx2x *bp)
|
||||
{
|
||||
int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
|
||||
|
||||
if (cmng_fns != CMNG_FNS_NONE) {
|
||||
bnx2x_cmng_fns_init(bp, false, cmng_fns);
|
||||
storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
|
||||
} else {
|
||||
/* rate shaping and fairness are disabled */
|
||||
DP(NETIF_MSG_IFUP,
|
||||
"single function mode without fairness\n");
|
||||
}
|
||||
}
|
||||
|
||||
/* This function is called upon link interrupt */
|
||||
static void bnx2x_link_attn(struct bnx2x *bp)
|
||||
{
|
||||
@ -2568,17 +2583,8 @@ static void bnx2x_link_attn(struct bnx2x *bp)
|
||||
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
|
||||
}
|
||||
|
||||
if (bp->link_vars.link_up && bp->link_vars.line_speed) {
|
||||
int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
|
||||
|
||||
if (cmng_fns != CMNG_FNS_NONE) {
|
||||
bnx2x_cmng_fns_init(bp, false, cmng_fns);
|
||||
storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
|
||||
} else
|
||||
/* rate shaping and fairness are disabled */
|
||||
DP(NETIF_MSG_IFUP,
|
||||
"single function mode without fairness\n");
|
||||
}
|
||||
if (bp->link_vars.link_up && bp->link_vars.line_speed)
|
||||
bnx2x_set_local_cmng(bp);
|
||||
|
||||
__bnx2x_link_report(bp);
|
||||
|
||||
@ -10362,6 +10368,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
|
||||
|
||||
bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
|
||||
BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
|
||||
|
||||
bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
|
||||
BC_SUPPORTS_RMMOD_CMD : 0;
|
||||
|
||||
boot_mode = SHMEM_RD(bp,
|
||||
dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
|
||||
PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
|
||||
@ -11524,6 +11534,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
|
||||
mutex_init(&bp->port.phy_mutex);
|
||||
mutex_init(&bp->fw_mb_mutex);
|
||||
spin_lock_init(&bp->stats_lock);
|
||||
sema_init(&bp->stats_sema, 1);
|
||||
|
||||
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
|
||||
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
|
||||
@ -12817,13 +12828,17 @@ static void __bnx2x_remove(struct pci_dev *pdev,
|
||||
bnx2x_dcbnl_update_applist(bp, true);
|
||||
#endif
|
||||
|
||||
if (IS_PF(bp) &&
|
||||
!BP_NOMCP(bp) &&
|
||||
(bp->flags & BC_SUPPORTS_RMMOD_CMD))
|
||||
bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
|
||||
|
||||
/* Close the interface - either directly or implicitly */
|
||||
if (remove_netdev) {
|
||||
unregister_netdev(dev);
|
||||
} else {
|
||||
rtnl_lock();
|
||||
if (netif_running(dev))
|
||||
bnx2x_close(dev);
|
||||
dev_close(dev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
|
@ -3463,7 +3463,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
|
||||
alloc_mem_err:
|
||||
BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
|
||||
sizeof(struct bnx2x_vf_mbx_msg));
|
||||
BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
|
||||
BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
|
||||
sizeof(union pf_vf_bulletin));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
|
||||
* Statistics service functions
|
||||
*/
|
||||
|
||||
static void bnx2x_stats_pmf_update(struct bnx2x *bp)
|
||||
/* should be called under stats_sema */
|
||||
static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
|
||||
{
|
||||
struct dmae_command *dmae;
|
||||
u32 opcode;
|
||||
@ -518,7 +519,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
|
||||
*stats_comp = 0;
|
||||
}
|
||||
|
||||
static void bnx2x_stats_start(struct bnx2x *bp)
|
||||
/* should be called under stats_sema */
|
||||
static void __bnx2x_stats_start(struct bnx2x *bp)
|
||||
{
|
||||
/* vfs travel through here as part of the statistics FSM, but no action
|
||||
* is required
|
||||
@ -534,13 +536,34 @@ static void bnx2x_stats_start(struct bnx2x *bp)
|
||||
|
||||
bnx2x_hw_stats_post(bp);
|
||||
bnx2x_storm_stats_post(bp);
|
||||
|
||||
bp->stats_started = true;
|
||||
}
|
||||
|
||||
static void bnx2x_stats_start(struct bnx2x *bp)
|
||||
{
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
__bnx2x_stats_start(bp);
|
||||
up(&bp->stats_sema);
|
||||
}
|
||||
|
||||
static void bnx2x_stats_pmf_start(struct bnx2x *bp)
|
||||
{
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
bnx2x_stats_comp(bp);
|
||||
bnx2x_stats_pmf_update(bp);
|
||||
bnx2x_stats_start(bp);
|
||||
__bnx2x_stats_pmf_update(bp);
|
||||
__bnx2x_stats_start(bp);
|
||||
up(&bp->stats_sema);
|
||||
}
|
||||
|
||||
static void bnx2x_stats_pmf_update(struct bnx2x *bp)
|
||||
{
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
__bnx2x_stats_pmf_update(bp);
|
||||
up(&bp->stats_sema);
|
||||
}
|
||||
|
||||
static void bnx2x_stats_restart(struct bnx2x *bp)
|
||||
@ -550,8 +573,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
|
||||
*/
|
||||
if (IS_VF(bp))
|
||||
return;
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
bnx2x_stats_comp(bp);
|
||||
bnx2x_stats_start(bp);
|
||||
__bnx2x_stats_start(bp);
|
||||
up(&bp->stats_sema);
|
||||
}
|
||||
|
||||
static void bnx2x_bmac_stats_update(struct bnx2x *bp)
|
||||
@ -888,9 +914,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
|
||||
/* Make sure we use the value of the counter
|
||||
* used for sending the last stats ramrod.
|
||||
*/
|
||||
spin_lock_bh(&bp->stats_lock);
|
||||
cur_stats_counter = bp->stats_counter - 1;
|
||||
spin_unlock_bh(&bp->stats_lock);
|
||||
|
||||
/* are storm stats valid? */
|
||||
if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
|
||||
@ -1227,12 +1251,18 @@ static void bnx2x_stats_update(struct bnx2x *bp)
|
||||
{
|
||||
u32 *stats_comp = bnx2x_sp(bp, stats_comp);
|
||||
|
||||
if (bnx2x_edebug_stats_stopped(bp))
|
||||
/* we run update from timer context, so give up
|
||||
* if somebody is in the middle of transition
|
||||
*/
|
||||
if (down_trylock(&bp->stats_sema))
|
||||
return;
|
||||
|
||||
if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
|
||||
goto out;
|
||||
|
||||
if (IS_PF(bp)) {
|
||||
if (*stats_comp != DMAE_COMP_VAL)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
if (bp->port.pmf)
|
||||
bnx2x_hw_stats_update(bp);
|
||||
@ -1242,7 +1272,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
|
||||
BNX2X_ERR("storm stats were not updated for 3 times\n");
|
||||
bnx2x_panic();
|
||||
}
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
/* vf doesn't collect HW statistics, and doesn't get completions
|
||||
@ -1256,7 +1286,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
|
||||
|
||||
/* vf is done */
|
||||
if (IS_VF(bp))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
if (netif_msg_timer(bp)) {
|
||||
struct bnx2x_eth_stats *estats = &bp->eth_stats;
|
||||
@ -1267,6 +1297,9 @@ static void bnx2x_stats_update(struct bnx2x *bp)
|
||||
|
||||
bnx2x_hw_stats_post(bp);
|
||||
bnx2x_storm_stats_post(bp);
|
||||
|
||||
out:
|
||||
up(&bp->stats_sema);
|
||||
}
|
||||
|
||||
static void bnx2x_port_stats_stop(struct bnx2x *bp)
|
||||
@ -1332,6 +1365,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
|
||||
{
|
||||
int update = 0;
|
||||
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
|
||||
bp->stats_started = false;
|
||||
|
||||
bnx2x_stats_comp(bp);
|
||||
|
||||
if (bp->port.pmf)
|
||||
@ -1348,6 +1386,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
|
||||
bnx2x_hw_stats_post(bp);
|
||||
bnx2x_stats_comp(bp);
|
||||
}
|
||||
|
||||
up(&bp->stats_sema);
|
||||
}
|
||||
|
||||
static void bnx2x_stats_do_nothing(struct bnx2x *bp)
|
||||
@ -1376,15 +1416,17 @@ static const struct {
|
||||
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
|
||||
{
|
||||
enum bnx2x_stats_state state;
|
||||
void (*action)(struct bnx2x *bp);
|
||||
if (unlikely(bp->panic))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&bp->stats_lock);
|
||||
state = bp->stats_state;
|
||||
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
|
||||
action = bnx2x_stats_stm[state][event].action;
|
||||
spin_unlock_bh(&bp->stats_lock);
|
||||
|
||||
bnx2x_stats_stm[state][event].action(bp);
|
||||
action(bp);
|
||||
|
||||
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
|
||||
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
|
||||
|
@ -17796,8 +17796,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
|
||||
|
||||
done:
|
||||
if (state == pci_channel_io_perm_failure) {
|
||||
tg3_napi_enable(tp);
|
||||
dev_close(netdev);
|
||||
if (netdev) {
|
||||
tg3_napi_enable(tp);
|
||||
dev_close(netdev);
|
||||
}
|
||||
err = PCI_ERS_RESULT_DISCONNECT;
|
||||
} else {
|
||||
pci_disable_device(pdev);
|
||||
@ -17827,7 +17829,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
|
||||
rtnl_lock();
|
||||
|
||||
if (pci_enable_device(pdev)) {
|
||||
netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
|
||||
dev_err(&pdev->dev,
|
||||
"Cannot re-enable PCI device after reset.\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
@ -17835,7 +17838,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
|
||||
pci_restore_state(pdev);
|
||||
pci_save_state(pdev);
|
||||
|
||||
if (!netif_running(netdev)) {
|
||||
if (!netdev || !netif_running(netdev)) {
|
||||
rc = PCI_ERS_RESULT_RECOVERED;
|
||||
goto done;
|
||||
}
|
||||
@ -17847,7 +17850,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
|
||||
rc = PCI_ERS_RESULT_RECOVERED;
|
||||
|
||||
done:
|
||||
if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) {
|
||||
if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
|
||||
tg3_napi_enable(tp);
|
||||
dev_close(netdev);
|
||||
}
|
||||
|
@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
|
||||
q->pg_chunk.offset = 0;
|
||||
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
|
||||
0, q->alloc_size, PCI_DMA_FROMDEVICE);
|
||||
if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
|
||||
__free_pages(q->pg_chunk.page, order);
|
||||
q->pg_chunk.page = NULL;
|
||||
return -EIO;
|
||||
}
|
||||
q->pg_chunk.mapping = mapping;
|
||||
}
|
||||
sd->pg_chunk = q->pg_chunk;
|
||||
@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
|
||||
return flits_to_desc(flits);
|
||||
}
|
||||
|
||||
|
||||
/* map_skb - map a packet main body and its page fragments
|
||||
* @pdev: the PCI device
|
||||
* @skb: the packet
|
||||
* @addr: placeholder to save the mapped addresses
|
||||
*
|
||||
* map the main body of an sk_buff and its page fragments, if any.
|
||||
*/
|
||||
static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
|
||||
dma_addr_t *addr)
|
||||
{
|
||||
const skb_frag_t *fp, *end;
|
||||
const struct skb_shared_info *si;
|
||||
|
||||
*addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
|
||||
PCI_DMA_TODEVICE);
|
||||
if (pci_dma_mapping_error(pdev, *addr))
|
||||
goto out_err;
|
||||
|
||||
si = skb_shinfo(skb);
|
||||
end = &si->frags[si->nr_frags];
|
||||
|
||||
for (fp = si->frags; fp < end; fp++) {
|
||||
*++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
|
||||
DMA_TO_DEVICE);
|
||||
if (pci_dma_mapping_error(pdev, *addr))
|
||||
goto unwind;
|
||||
}
|
||||
return 0;
|
||||
|
||||
unwind:
|
||||
while (fp-- > si->frags)
|
||||
dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
|
||||
out_err:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* write_sgl - populate a scatter/gather list for a packet
|
||||
* make_sgl - populate a scatter/gather list for a packet
|
||||
* @skb: the packet
|
||||
* @sgp: the SGL to populate
|
||||
* @start: start address of skb main body data to include in the SGL
|
||||
* @len: length of skb main body data to include in the SGL
|
||||
* @addr: the list of the mapped addresses
|
||||
* @pdev: the PCI device
|
||||
*
|
||||
* Copies the scatter/gather list for the buffers that make up a packet
|
||||
* Generates a scatter/gather list for the buffers that make up a packet
|
||||
* and returns the SGL size in 8-byte words. The caller must size the SGL
|
||||
* appropriately.
|
||||
*/
|
||||
static inline unsigned int write_sgl(const struct sk_buff *skb,
|
||||
static inline unsigned int make_sgl(const struct sk_buff *skb,
|
||||
struct sg_ent *sgp, unsigned char *start,
|
||||
unsigned int len, const dma_addr_t *addr)
|
||||
unsigned int len, struct pci_dev *pdev)
|
||||
{
|
||||
unsigned int i, j = 0, k = 0, nfrags;
|
||||
dma_addr_t mapping;
|
||||
unsigned int i, j = 0, nfrags;
|
||||
|
||||
if (len) {
|
||||
mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
|
||||
sgp->len[0] = cpu_to_be32(len);
|
||||
sgp->addr[j++] = cpu_to_be64(addr[k++]);
|
||||
sgp->addr[0] = cpu_to_be64(mapping);
|
||||
j = 1;
|
||||
}
|
||||
|
||||
nfrags = skb_shinfo(skb)->nr_frags;
|
||||
for (i = 0; i < nfrags; i++) {
|
||||
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
||||
mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
|
||||
DMA_TO_DEVICE);
|
||||
sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
|
||||
sgp->addr[j] = cpu_to_be64(addr[k++]);
|
||||
sgp->addr[j] = cpu_to_be64(mapping);
|
||||
j ^= 1;
|
||||
if (j == 0)
|
||||
++sgp;
|
||||
@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
|
||||
const struct port_info *pi,
|
||||
unsigned int pidx, unsigned int gen,
|
||||
struct sge_txq *q, unsigned int ndesc,
|
||||
unsigned int compl, const dma_addr_t *addr)
|
||||
unsigned int compl)
|
||||
{
|
||||
unsigned int flits, sgl_flits, cntrl, tso_info;
|
||||
struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
|
||||
@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
|
||||
sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
|
||||
sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
|
||||
|
||||
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
|
||||
htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
|
||||
@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
struct netdev_queue *txq;
|
||||
struct sge_qset *qs;
|
||||
struct sge_txq *q;
|
||||
dma_addr_t addr[MAX_SKB_FRAGS + 1];
|
||||
|
||||
/*
|
||||
* The chip min packet length is 9 octets but play safe and reject
|
||||
@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
|
||||
dev_kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
q->in_use += ndesc;
|
||||
if (unlikely(credits - ndesc < q->stop_thres)) {
|
||||
t3_stop_tx_queue(txq, qs, q);
|
||||
@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (likely(!skb_shared(skb)))
|
||||
skb_orphan(skb);
|
||||
|
||||
write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
|
||||
write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
|
||||
check_ring_tx_db(adap, q);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
|
||||
*/
|
||||
static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
|
||||
struct sge_txq *q, unsigned int pidx,
|
||||
unsigned int gen, unsigned int ndesc,
|
||||
const dma_addr_t *addr)
|
||||
unsigned int gen, unsigned int ndesc)
|
||||
{
|
||||
unsigned int sgl_flits, flits;
|
||||
struct work_request_hdr *from;
|
||||
@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
|
||||
|
||||
flits = skb_transport_offset(skb) / 8;
|
||||
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
|
||||
sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
|
||||
skb_tail_pointer(skb) -
|
||||
skb_transport_header(skb), addr);
|
||||
sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
|
||||
skb->tail - skb->transport_header,
|
||||
adap->pdev);
|
||||
if (need_skb_unmap()) {
|
||||
setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
|
||||
skb->destructor = deferred_unmap_destructor;
|
||||
@ -1705,11 +1658,6 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
|
||||
spin_unlock(&q->lock);
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
||||
gen = q->gen;
|
||||
q->in_use += ndesc;
|
||||
pidx = q->pidx;
|
||||
@ -1720,7 +1668,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
|
||||
}
|
||||
spin_unlock(&q->lock);
|
||||
|
||||
write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
|
||||
write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
|
||||
check_ring_tx_db(adap, q);
|
||||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data)
|
||||
struct sge_txq *q = &qs->txq[TXQ_OFLD];
|
||||
const struct port_info *pi = netdev_priv(qs->netdev);
|
||||
struct adapter *adap = pi->adapter;
|
||||
unsigned int written = 0;
|
||||
|
||||
spin_lock(&q->lock);
|
||||
again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
|
||||
@ -1758,14 +1705,10 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
|
||||
break;
|
||||
}
|
||||
|
||||
if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
|
||||
break;
|
||||
|
||||
gen = q->gen;
|
||||
q->in_use += ndesc;
|
||||
pidx = q->pidx;
|
||||
q->pidx += ndesc;
|
||||
written += ndesc;
|
||||
if (q->pidx >= q->size) {
|
||||
q->pidx -= q->size;
|
||||
q->gen ^= 1;
|
||||
@ -1773,8 +1716,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
|
||||
__skb_unlink(skb, &q->sendq);
|
||||
spin_unlock(&q->lock);
|
||||
|
||||
write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
|
||||
(dma_addr_t *)skb->head);
|
||||
write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
|
||||
spin_lock(&q->lock);
|
||||
}
|
||||
spin_unlock(&q->lock);
|
||||
@ -1784,9 +1726,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
|
||||
set_bit(TXQ_LAST_PKT_DB, &q->flags);
|
||||
#endif
|
||||
wmb();
|
||||
if (likely(written))
|
||||
t3_write_reg(adap, A_SG_KDOORBELL,
|
||||
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
|
||||
t3_write_reg(adap, A_SG_KDOORBELL,
|
||||
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3048,6 +3048,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
|
||||
|
||||
adapter->max_event_queues = le16_to_cpu(desc->eq_count);
|
||||
adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
|
||||
|
||||
/* Clear flags that driver is not interested in */
|
||||
adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT;
|
||||
}
|
||||
err:
|
||||
mutex_unlock(&adapter->mbox_lock);
|
||||
|
@ -563,6 +563,12 @@ enum be_if_flags {
|
||||
BE_IF_FLAGS_MULTICAST = 0x1000
|
||||
};
|
||||
|
||||
#define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\
|
||||
BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\
|
||||
BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\
|
||||
BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
|
||||
BE_IF_FLAGS_UNTAGGED)
|
||||
|
||||
/* An RX interface is an object with one or more MAC addresses and
|
||||
* filtering capabilities. */
|
||||
struct be_cmd_req_if_create {
|
||||
|
@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
|
||||
}
|
||||
|
||||
/* Allocate and setup a new buffer for receiving */
|
||||
static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
|
||||
struct sk_buff *skb, unsigned int bufsize)
|
||||
static int skge_rx_setup(struct skge_port *skge, struct skge_element *e,
|
||||
struct sk_buff *skb, unsigned int bufsize)
|
||||
{
|
||||
struct skge_rx_desc *rd = e->desc;
|
||||
u64 map;
|
||||
dma_addr_t map;
|
||||
|
||||
map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
|
||||
rd->dma_lo = map;
|
||||
rd->dma_hi = map >> 32;
|
||||
if (pci_dma_mapping_error(skge->hw->pdev, map))
|
||||
return -1;
|
||||
|
||||
rd->dma_lo = lower_32_bits(map);
|
||||
rd->dma_hi = upper_32_bits(map);
|
||||
e->skb = skb;
|
||||
rd->csum1_start = ETH_HLEN;
|
||||
rd->csum2_start = ETH_HLEN;
|
||||
@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
|
||||
rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
|
||||
dma_unmap_addr_set(e, mapaddr, map);
|
||||
dma_unmap_len_set(e, maplen, bufsize);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Resume receiving using existing skb,
|
||||
@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev)
|
||||
return -ENOMEM;
|
||||
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
skge_rx_setup(skge, e, skb, skge->rx_buf_size);
|
||||
if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) {
|
||||
dev_kfree_skb(skb);
|
||||
return -EIO;
|
||||
}
|
||||
} while ((e = e->next) != ring->start);
|
||||
|
||||
ring->to_clean = ring->start;
|
||||
@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev)
|
||||
|
||||
BUG_ON(skge->dma & 7);
|
||||
|
||||
if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
|
||||
if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) {
|
||||
dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
|
||||
err = -EINVAL;
|
||||
goto free_pci_mem;
|
||||
@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
|
||||
struct skge_tx_desc *td;
|
||||
int i;
|
||||
u32 control, len;
|
||||
u64 map;
|
||||
dma_addr_t map;
|
||||
|
||||
if (skb_padto(skb, ETH_ZLEN))
|
||||
return NETDEV_TX_OK;
|
||||
@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
|
||||
e->skb = skb;
|
||||
len = skb_headlen(skb);
|
||||
map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
|
||||
if (pci_dma_mapping_error(hw->pdev, map))
|
||||
goto mapping_error;
|
||||
|
||||
dma_unmap_addr_set(e, mapaddr, map);
|
||||
dma_unmap_len_set(e, maplen, len);
|
||||
|
||||
td->dma_lo = map;
|
||||
td->dma_hi = map >> 32;
|
||||
td->dma_lo = lower_32_bits(map);
|
||||
td->dma_hi = upper_32_bits(map);
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
const int offset = skb_checksum_start_offset(skb);
|
||||
@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
|
||||
|
||||
map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
|
||||
skb_frag_size(frag), DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&hw->pdev->dev, map))
|
||||
goto mapping_unwind;
|
||||
|
||||
e = e->next;
|
||||
e->skb = skb;
|
||||
tf = e->desc;
|
||||
BUG_ON(tf->control & BMU_OWN);
|
||||
|
||||
tf->dma_lo = map;
|
||||
tf->dma_hi = (u64) map >> 32;
|
||||
tf->dma_lo = lower_32_bits(map);
|
||||
tf->dma_hi = upper_32_bits(map);
|
||||
dma_unmap_addr_set(e, mapaddr, map);
|
||||
dma_unmap_len_set(e, maplen, skb_frag_size(frag));
|
||||
|
||||
@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
mapping_unwind:
|
||||
e = skge->tx_ring.to_use;
|
||||
pci_unmap_single(hw->pdev,
|
||||
dma_unmap_addr(e, mapaddr),
|
||||
dma_unmap_len(e, maplen),
|
||||
PCI_DMA_TODEVICE);
|
||||
while (i-- > 0) {
|
||||
e = e->next;
|
||||
pci_unmap_page(hw->pdev,
|
||||
dma_unmap_addr(e, mapaddr),
|
||||
dma_unmap_len(e, maplen),
|
||||
PCI_DMA_TODEVICE);
|
||||
}
|
||||
|
||||
mapping_error:
|
||||
if (net_ratelimit())
|
||||
dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
|
||||
dev_kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
||||
@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
|
||||
|
||||
pci_dma_sync_single_for_cpu(skge->hw->pdev,
|
||||
dma_unmap_addr(e, mapaddr),
|
||||
len, PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_len(e, maplen),
|
||||
PCI_DMA_FROMDEVICE);
|
||||
skb_copy_from_linear_data(e->skb, skb->data, len);
|
||||
pci_dma_sync_single_for_device(skge->hw->pdev,
|
||||
dma_unmap_addr(e, mapaddr),
|
||||
len, PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_len(e, maplen),
|
||||
PCI_DMA_FROMDEVICE);
|
||||
skge_rx_reuse(e, skge->rx_buf_size);
|
||||
} else {
|
||||
struct sk_buff *nskb;
|
||||
@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
|
||||
if (!nskb)
|
||||
goto resubmit;
|
||||
|
||||
if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
|
||||
dev_kfree_skb(nskb);
|
||||
goto resubmit;
|
||||
}
|
||||
|
||||
pci_unmap_single(skge->hw->pdev,
|
||||
dma_unmap_addr(e, mapaddr),
|
||||
dma_unmap_len(e, maplen),
|
||||
PCI_DMA_FROMDEVICE);
|
||||
skb = e->skb;
|
||||
prefetch(skb->data);
|
||||
skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
|
||||
}
|
||||
|
||||
skb_put(skb, len);
|
||||
|
@ -46,7 +46,7 @@
|
||||
#include "mlx5_core.h"
|
||||
|
||||
enum {
|
||||
CMD_IF_REV = 4,
|
||||
CMD_IF_REV = 5,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
|
||||
case MLX5_EVENT_TYPE_PAGE_REQUEST:
|
||||
{
|
||||
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
|
||||
s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
|
||||
s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
|
||||
|
||||
mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
|
||||
mlx5_core_req_pages_handler(dev, func_id, npages);
|
||||
|
@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
|
||||
caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
|
||||
caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
|
||||
caps->log_max_mcg = out->hca_cap.log_max_mcg;
|
||||
caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg);
|
||||
caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
|
||||
caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
|
||||
caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
|
||||
caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
|
||||
|
@ -55,33 +55,9 @@ enum {
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(health_lock);
|
||||
|
||||
static LIST_HEAD(health_list);
|
||||
static struct work_struct health_work;
|
||||
|
||||
static health_handler_t reg_handler;
|
||||
int mlx5_register_health_report_handler(health_handler_t handler)
|
||||
{
|
||||
spin_lock_irq(&health_lock);
|
||||
if (reg_handler) {
|
||||
spin_unlock_irq(&health_lock);
|
||||
return -EEXIST;
|
||||
}
|
||||
reg_handler = handler;
|
||||
spin_unlock_irq(&health_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_register_health_report_handler);
|
||||
|
||||
void mlx5_unregister_health_report_handler(void)
|
||||
{
|
||||
spin_lock_irq(&health_lock);
|
||||
reg_handler = NULL;
|
||||
spin_unlock_irq(&health_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_unregister_health_report_handler);
|
||||
|
||||
static void health_care(struct work_struct *work)
|
||||
{
|
||||
struct mlx5_core_health *health, *n;
|
||||
@ -98,11 +74,8 @@ static void health_care(struct work_struct *work)
|
||||
priv = container_of(health, struct mlx5_priv, health);
|
||||
dev = container_of(priv, struct mlx5_core_dev, priv);
|
||||
mlx5_core_warn(dev, "handling bad device here\n");
|
||||
/* nothing yet */
|
||||
spin_lock_irq(&health_lock);
|
||||
if (reg_handler)
|
||||
reg_handler(dev->pdev, health->health,
|
||||
sizeof(health->health));
|
||||
|
||||
list_del_init(&health->list);
|
||||
spin_unlock_irq(&health_lock);
|
||||
}
|
||||
|
@ -43,10 +43,16 @@ enum {
|
||||
MLX5_PAGES_TAKE = 2
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_BOOT_PAGES = 1,
|
||||
MLX5_INIT_PAGES = 2,
|
||||
MLX5_POST_INIT_PAGES = 3
|
||||
};
|
||||
|
||||
struct mlx5_pages_req {
|
||||
struct mlx5_core_dev *dev;
|
||||
u32 func_id;
|
||||
s16 npages;
|
||||
s32 npages;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox {
|
||||
|
||||
struct mlx5_query_pages_outbox {
|
||||
struct mlx5_outbox_hdr hdr;
|
||||
__be16 num_boot_pages;
|
||||
__be16 rsvd;
|
||||
__be16 func_id;
|
||||
__be16 init_pages;
|
||||
__be16 num_pages;
|
||||
__be32 num_pages;
|
||||
};
|
||||
|
||||
struct mlx5_manage_pages_inbox {
|
||||
struct mlx5_inbox_hdr hdr;
|
||||
__be16 rsvd0;
|
||||
__be16 rsvd;
|
||||
__be16 func_id;
|
||||
__be16 rsvd1;
|
||||
__be16 num_entries;
|
||||
u8 rsvd2[16];
|
||||
__be32 num_entries;
|
||||
__be64 pas[0];
|
||||
};
|
||||
|
||||
struct mlx5_manage_pages_outbox {
|
||||
struct mlx5_outbox_hdr hdr;
|
||||
u8 rsvd0[2];
|
||||
__be16 num_entries;
|
||||
u8 rsvd1[20];
|
||||
__be32 num_entries;
|
||||
u8 rsvd[4];
|
||||
__be64 pas[0];
|
||||
};
|
||||
|
||||
@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
|
||||
}
|
||||
|
||||
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
|
||||
s16 *pages, s16 *init_pages, u16 *boot_pages)
|
||||
s32 *npages, int boot)
|
||||
{
|
||||
struct mlx5_query_pages_inbox in;
|
||||
struct mlx5_query_pages_outbox out;
|
||||
@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
|
||||
memset(&in, 0, sizeof(in));
|
||||
memset(&out, 0, sizeof(out));
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
|
||||
in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
|
||||
|
||||
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
|
||||
if (out.hdr.status)
|
||||
return mlx5_cmd_status_to_err(&out.hdr);
|
||||
|
||||
if (pages)
|
||||
*pages = be16_to_cpu(out.num_pages);
|
||||
|
||||
if (init_pages)
|
||||
*init_pages = be16_to_cpu(out.init_pages);
|
||||
|
||||
if (boot_pages)
|
||||
*boot_pages = be16_to_cpu(out.num_boot_pages);
|
||||
|
||||
*npages = be32_to_cpu(out.num_pages);
|
||||
*func_id = be16_to_cpu(out.func_id);
|
||||
|
||||
return err;
|
||||
@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
|
||||
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
|
||||
in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
|
||||
in->func_id = cpu_to_be16(func_id);
|
||||
in->num_entries = cpu_to_be16(npages);
|
||||
in->num_entries = cpu_to_be32(npages);
|
||||
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
|
||||
mlx5_core_dbg(dev, "err %d\n", err);
|
||||
if (err) {
|
||||
@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
|
||||
in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
|
||||
in.func_id = cpu_to_be16(func_id);
|
||||
in.num_entries = cpu_to_be16(npages);
|
||||
in.num_entries = cpu_to_be32(npages);
|
||||
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
|
||||
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
|
||||
if (err) {
|
||||
@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
num_claimed = be16_to_cpu(out->num_entries);
|
||||
num_claimed = be32_to_cpu(out->num_entries);
|
||||
if (nclaimed)
|
||||
*nclaimed = num_claimed;
|
||||
|
||||
@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work)
|
||||
}
|
||||
|
||||
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
|
||||
s16 npages)
|
||||
s32 npages)
|
||||
{
|
||||
struct mlx5_pages_req *req;
|
||||
|
||||
@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
|
||||
|
||||
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
|
||||
{
|
||||
u16 uninitialized_var(boot_pages);
|
||||
s16 uninitialized_var(init_pages);
|
||||
u16 uninitialized_var(func_id);
|
||||
s32 uninitialized_var(npages);
|
||||
int err;
|
||||
|
||||
err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages,
|
||||
&boot_pages);
|
||||
err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
|
||||
npages, boot ? "boot" : "init", func_id);
|
||||
|
||||
mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n",
|
||||
init_pages, boot_pages, func_id);
|
||||
return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0);
|
||||
return give_pages(dev, func_id, npages, 0);
|
||||
}
|
||||
|
||||
static int optimal_reclaimed_pages(void)
|
||||
|
@ -3266,6 +3266,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
|
||||
u8 val;
|
||||
int ret, max_sds_rings = adapter->max_sds_rings;
|
||||
|
||||
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
|
||||
netdev_info(netdev, "Device is resetting\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (qlcnic_get_diag_lock(adapter)) {
|
||||
netdev_info(netdev, "Device in diagnostics mode\n");
|
||||
return -EBUSY;
|
||||
|
@ -629,7 +629,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
qlcnic_set_drv_version(adapter);
|
||||
if (adapter->portnum == 0)
|
||||
qlcnic_set_drv_version(adapter);
|
||||
qlcnic_83xx_idc_attach_driver(adapter);
|
||||
|
||||
return 0;
|
||||
|
@ -2165,7 +2165,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (err)
|
||||
goto err_out_disable_mbx_intr;
|
||||
|
||||
qlcnic_set_drv_version(adapter);
|
||||
if (adapter->portnum == 0)
|
||||
qlcnic_set_drv_version(adapter);
|
||||
|
||||
pci_set_drvdata(pdev, adapter);
|
||||
|
||||
@ -3085,7 +3086,8 @@ done:
|
||||
adapter->fw_fail_cnt = 0;
|
||||
adapter->flags &= ~QLCNIC_FW_HANG;
|
||||
clear_bit(__QLCNIC_RESETTING, &adapter->state);
|
||||
qlcnic_set_drv_version(adapter);
|
||||
if (adapter->portnum == 0)
|
||||
qlcnic_set_drv_version(adapter);
|
||||
|
||||
if (!qlcnic_clr_drv_state(adapter))
|
||||
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
|
||||
|
@ -170,9 +170,9 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
|
||||
|
||||
if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
|
||||
err = qlcnic_get_beacon_state(adapter, &h_beacon_state);
|
||||
if (!err) {
|
||||
dev_info(&adapter->pdev->dev,
|
||||
"Failed to get current beacon state\n");
|
||||
if (err) {
|
||||
netdev_err(adapter->netdev,
|
||||
"Failed to get current beacon state\n");
|
||||
} else {
|
||||
if (h_beacon_state == QLCNIC_BEACON_DISABLE)
|
||||
ahw->beacon_state = 0;
|
||||
|
@ -524,6 +524,7 @@ rx_status_loop:
|
||||
PCI_DMA_FROMDEVICE);
|
||||
if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
|
||||
dev->stats.rx_dropped++;
|
||||
kfree_skb(new_skb);
|
||||
goto rx_next;
|
||||
}
|
||||
|
||||
|
@ -33,10 +33,15 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||
struct stmmac_priv *priv = (struct stmmac_priv *)p;
|
||||
unsigned int txsize = priv->dma_tx_size;
|
||||
unsigned int entry = priv->cur_tx % txsize;
|
||||
struct dma_desc *desc = priv->dma_tx + entry;
|
||||
struct dma_desc *desc;
|
||||
unsigned int nopaged_len = skb_headlen(skb);
|
||||
unsigned int bmax, len;
|
||||
|
||||
if (priv->extend_desc)
|
||||
desc = (struct dma_desc *)(priv->dma_etx + entry);
|
||||
else
|
||||
desc = priv->dma_tx + entry;
|
||||
|
||||
if (priv->plat->enh_desc)
|
||||
bmax = BUF_SIZE_8KiB;
|
||||
else
|
||||
@ -54,7 +59,11 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
|
||||
STMMAC_RING_MODE);
|
||||
wmb();
|
||||
entry = (++priv->cur_tx) % txsize;
|
||||
desc = priv->dma_tx + entry;
|
||||
|
||||
if (priv->extend_desc)
|
||||
desc = (struct dma_desc *)(priv->dma_etx + entry);
|
||||
else
|
||||
desc = priv->dma_tx + entry;
|
||||
|
||||
desc->des2 = dma_map_single(priv->device, skb->data + bmax,
|
||||
len, DMA_TO_DEVICE);
|
||||
|
@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
|
||||
|
||||
skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
|
||||
GFP_KERNEL);
|
||||
if (unlikely(skb == NULL)) {
|
||||
if (!skb) {
|
||||
pr_err("%s: Rx init fails; skb is NULL\n", __func__);
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
priv->rx_skbuff[i] = skb;
|
||||
priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
|
||||
priv->dma_buf_sz,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
|
||||
pr_err("%s: DMA mapping error\n", __func__);
|
||||
dev_kfree_skb_any(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
p->des2 = priv->rx_skbuff_dma[i];
|
||||
|
||||
@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
|
||||
{
|
||||
if (priv->rx_skbuff[i]) {
|
||||
dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
|
||||
priv->dma_buf_sz, DMA_FROM_DEVICE);
|
||||
dev_kfree_skb_any(priv->rx_skbuff[i]);
|
||||
}
|
||||
priv->rx_skbuff[i] = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* init_dma_desc_rings - init the RX/TX descriptor rings
|
||||
* @dev: net device structure
|
||||
@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
|
||||
* and allocates the socket buffers. It suppors the chained and ring
|
||||
* modes.
|
||||
*/
|
||||
static void init_dma_desc_rings(struct net_device *dev)
|
||||
static int init_dma_desc_rings(struct net_device *dev)
|
||||
{
|
||||
int i;
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
unsigned int txsize = priv->dma_tx_size;
|
||||
unsigned int rxsize = priv->dma_rx_size;
|
||||
unsigned int bfsize = 0;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
/* Set the max buffer size according to the DESC mode
|
||||
* and the MTU. Note that RING mode allows 16KiB bsize.
|
||||
@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev)
|
||||
dma_extended_desc),
|
||||
&priv->dma_rx_phy,
|
||||
GFP_KERNEL);
|
||||
if (!priv->dma_erx)
|
||||
goto err_dma;
|
||||
|
||||
priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
|
||||
sizeof(struct
|
||||
dma_extended_desc),
|
||||
&priv->dma_tx_phy,
|
||||
GFP_KERNEL);
|
||||
if ((!priv->dma_erx) || (!priv->dma_etx))
|
||||
return;
|
||||
if (!priv->dma_etx) {
|
||||
dma_free_coherent(priv->device, priv->dma_rx_size *
|
||||
sizeof(struct dma_extended_desc),
|
||||
priv->dma_erx, priv->dma_rx_phy);
|
||||
goto err_dma;
|
||||
}
|
||||
} else {
|
||||
priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
|
||||
sizeof(struct dma_desc),
|
||||
&priv->dma_rx_phy,
|
||||
GFP_KERNEL);
|
||||
if (!priv->dma_rx)
|
||||
goto err_dma;
|
||||
|
||||
priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
|
||||
sizeof(struct dma_desc),
|
||||
&priv->dma_tx_phy,
|
||||
GFP_KERNEL);
|
||||
if ((!priv->dma_rx) || (!priv->dma_tx))
|
||||
return;
|
||||
if (!priv->dma_tx) {
|
||||
dma_free_coherent(priv->device, priv->dma_rx_size *
|
||||
sizeof(struct dma_desc),
|
||||
priv->dma_rx, priv->dma_rx_phy);
|
||||
goto err_dma;
|
||||
}
|
||||
}
|
||||
|
||||
priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
|
||||
GFP_KERNEL);
|
||||
if (!priv->rx_skbuff_dma)
|
||||
goto err_rx_skbuff_dma;
|
||||
|
||||
priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
|
||||
GFP_KERNEL);
|
||||
if (!priv->rx_skbuff)
|
||||
goto err_rx_skbuff;
|
||||
|
||||
priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
|
||||
GFP_KERNEL);
|
||||
if (!priv->tx_skbuff_dma)
|
||||
goto err_tx_skbuff_dma;
|
||||
|
||||
priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
|
||||
GFP_KERNEL);
|
||||
if (!priv->tx_skbuff)
|
||||
goto err_tx_skbuff;
|
||||
|
||||
if (netif_msg_probe(priv)) {
|
||||
pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
|
||||
(u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
|
||||
@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev)
|
||||
else
|
||||
p = priv->dma_rx + i;
|
||||
|
||||
if (stmmac_init_rx_buffers(priv, p, i))
|
||||
break;
|
||||
ret = stmmac_init_rx_buffers(priv, p, i);
|
||||
if (ret)
|
||||
goto err_init_rx_buffers;
|
||||
|
||||
if (netif_msg_probe(priv))
|
||||
pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
|
||||
@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev)
|
||||
|
||||
if (netif_msg_hw(priv))
|
||||
stmmac_display_rings(priv);
|
||||
|
||||
return 0;
|
||||
err_init_rx_buffers:
|
||||
while (--i >= 0)
|
||||
stmmac_free_rx_buffers(priv, i);
|
||||
kfree(priv->tx_skbuff);
|
||||
err_tx_skbuff:
|
||||
kfree(priv->tx_skbuff_dma);
|
||||
err_tx_skbuff_dma:
|
||||
kfree(priv->rx_skbuff);
|
||||
err_rx_skbuff:
|
||||
kfree(priv->rx_skbuff_dma);
|
||||
err_rx_skbuff_dma:
|
||||
if (priv->extend_desc) {
|
||||
dma_free_coherent(priv->device, priv->dma_tx_size *
|
||||
sizeof(struct dma_extended_desc),
|
||||
priv->dma_etx, priv->dma_tx_phy);
|
||||
dma_free_coherent(priv->device, priv->dma_rx_size *
|
||||
sizeof(struct dma_extended_desc),
|
||||
priv->dma_erx, priv->dma_rx_phy);
|
||||
} else {
|
||||
dma_free_coherent(priv->device,
|
||||
priv->dma_tx_size * sizeof(struct dma_desc),
|
||||
priv->dma_tx, priv->dma_tx_phy);
|
||||
dma_free_coherent(priv->device,
|
||||
priv->dma_rx_size * sizeof(struct dma_desc),
|
||||
priv->dma_rx, priv->dma_rx_phy);
|
||||
}
|
||||
err_dma:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dma_free_rx_skbufs(struct stmmac_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->dma_rx_size; i++) {
|
||||
if (priv->rx_skbuff[i]) {
|
||||
dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
|
||||
priv->dma_buf_sz, DMA_FROM_DEVICE);
|
||||
dev_kfree_skb_any(priv->rx_skbuff[i]);
|
||||
}
|
||||
priv->rx_skbuff[i] = NULL;
|
||||
}
|
||||
for (i = 0; i < priv->dma_rx_size; i++)
|
||||
stmmac_free_rx_buffers(priv, i);
|
||||
}
|
||||
|
||||
static void dma_free_tx_skbufs(struct stmmac_priv *priv)
|
||||
@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev)
|
||||
priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
|
||||
priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
|
||||
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
|
||||
init_dma_desc_rings(dev);
|
||||
|
||||
ret = init_dma_desc_rings(dev);
|
||||
if (ret < 0) {
|
||||
pr_err("%s: DMA descriptors initialization failed\n", __func__);
|
||||
goto dma_desc_error;
|
||||
}
|
||||
|
||||
/* DMA initialization and SW reset */
|
||||
ret = stmmac_init_dma_engine(priv);
|
||||
if (ret < 0) {
|
||||
pr_err("%s: DMA initialization failed\n", __func__);
|
||||
pr_err("%s: DMA engine initialization failed\n", __func__);
|
||||
goto init_error;
|
||||
}
|
||||
|
||||
@ -1672,6 +1744,7 @@ wolirq_error:
|
||||
|
||||
init_error:
|
||||
free_dma_desc_resources(priv);
|
||||
dma_desc_error:
|
||||
if (priv->phydev)
|
||||
phy_disconnect(priv->phydev);
|
||||
phy_error:
|
||||
|
@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
|
||||
|
||||
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
netif_rx(skb);
|
||||
netif_receive_skb(skb);
|
||||
|
||||
stats->rx_bytes += pkt_len;
|
||||
stats->rx_packets++;
|
||||
@ -2884,6 +2884,7 @@ out:
|
||||
return ret;
|
||||
|
||||
err_iounmap:
|
||||
netif_napi_del(&vptr->napi);
|
||||
iounmap(regs);
|
||||
err_free_dev:
|
||||
free_netdev(netdev);
|
||||
@ -2904,6 +2905,7 @@ static int velocity_remove(struct device *dev)
|
||||
struct velocity_info *vptr = netdev_priv(netdev);
|
||||
|
||||
unregister_netdev(netdev);
|
||||
netif_napi_del(&vptr->napi);
|
||||
iounmap(vptr->mac_regs);
|
||||
free_netdev(netdev);
|
||||
velocity_nics--;
|
||||
|
@ -739,6 +739,10 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
if (data && data[IFLA_MACVLAN_FLAGS] &&
|
||||
nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
|
||||
return -EINVAL;
|
||||
|
||||
if (data && data[IFLA_MACVLAN_MODE]) {
|
||||
switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
|
||||
case MACVLAN_MODE_PRIVATE:
|
||||
|
@ -818,10 +818,13 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
|
||||
}
|
||||
if (vlan)
|
||||
if (vlan) {
|
||||
local_bh_disable();
|
||||
macvlan_start_xmit(skb, vlan->dev);
|
||||
else
|
||||
local_bh_enable();
|
||||
} else {
|
||||
kfree_skb(skb);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return total_len;
|
||||
@ -912,8 +915,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
|
||||
done:
|
||||
rcu_read_lock();
|
||||
vlan = rcu_dereference(q->vlan);
|
||||
if (vlan)
|
||||
if (vlan) {
|
||||
preempt_disable();
|
||||
macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
|
||||
preempt_enable();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret ? ret : copied;
|
||||
|
@ -1074,8 +1074,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
u32 rxhash;
|
||||
|
||||
if (!(tun->flags & TUN_NO_PI)) {
|
||||
if ((len -= sizeof(pi)) > total_len)
|
||||
if (len < sizeof(pi))
|
||||
return -EINVAL;
|
||||
len -= sizeof(pi);
|
||||
|
||||
if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
|
||||
return -EFAULT;
|
||||
@ -1083,8 +1084,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
}
|
||||
|
||||
if (tun->flags & TUN_VNET_HDR) {
|
||||
if ((len -= tun->vnet_hdr_sz) > total_len)
|
||||
if (len < tun->vnet_hdr_sz)
|
||||
return -EINVAL;
|
||||
len -= tun->vnet_hdr_sz;
|
||||
|
||||
if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
|
||||
return -EFAULT;
|
||||
|
@ -1386,7 +1386,7 @@ static int vxlan_open(struct net_device *dev)
|
||||
return -ENOTCONN;
|
||||
|
||||
if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) &&
|
||||
! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
|
||||
vxlan_group_used(vn, vxlan->default_dst.remote_ip)) {
|
||||
vxlan_sock_hold(vs);
|
||||
dev_hold(dev);
|
||||
queue_work(vxlan_wq, &vxlan->igmp_join);
|
||||
@ -1793,8 +1793,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
|
||||
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
|
||||
flush_workqueue(vxlan_wq);
|
||||
|
||||
spin_lock(&vn->sock_lock);
|
||||
hlist_del_rcu(&vxlan->hlist);
|
||||
spin_unlock(&vn->sock_lock);
|
||||
|
@ -1406,11 +1406,8 @@ static void cw1200_do_unjoin(struct cw1200_common *priv)
|
||||
if (!priv->join_status)
|
||||
goto done;
|
||||
|
||||
if (priv->join_status > CW1200_JOIN_STATUS_IBSS) {
|
||||
wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n",
|
||||
priv->join_status);
|
||||
BUG_ON(1);
|
||||
}
|
||||
if (priv->join_status == CW1200_JOIN_STATUS_AP)
|
||||
goto done;
|
||||
|
||||
cancel_work_sync(&priv->update_filtering_work);
|
||||
cancel_work_sync(&priv->set_beacon_wakeup_period_work);
|
||||
|
@ -4460,12 +4460,12 @@ il4965_irq_tasklet(struct il_priv *il)
|
||||
* is killed. Hence update the killswitch state here. The
|
||||
* rfkill handler will care about restarting if needed.
|
||||
*/
|
||||
if (!test_bit(S_ALIVE, &il->status)) {
|
||||
if (hw_rf_kill)
|
||||
set_bit(S_RFKILL, &il->status);
|
||||
else
|
||||
clear_bit(S_RFKILL, &il->status);
|
||||
if (hw_rf_kill) {
|
||||
set_bit(S_RFKILL, &il->status);
|
||||
} else {
|
||||
clear_bit(S_RFKILL, &il->status);
|
||||
wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
|
||||
il_force_reset(il, true);
|
||||
}
|
||||
|
||||
handled |= CSR_INT_BIT_RF_KILL;
|
||||
@ -5334,6 +5334,9 @@ il4965_alive_start(struct il_priv *il)
|
||||
|
||||
il->active_rate = RATES_MASK;
|
||||
|
||||
il_power_update_mode(il, true);
|
||||
D_INFO("Updated power mode\n");
|
||||
|
||||
if (il_is_associated(il)) {
|
||||
struct il_rxon_cmd *active_rxon =
|
||||
(struct il_rxon_cmd *)&il->active;
|
||||
@ -5364,9 +5367,6 @@ il4965_alive_start(struct il_priv *il)
|
||||
D_INFO("ALIVE processing complete.\n");
|
||||
wake_up(&il->wait_command_queue);
|
||||
|
||||
il_power_update_mode(il, true);
|
||||
D_INFO("Updated power mode\n");
|
||||
|
||||
return;
|
||||
|
||||
restart:
|
||||
|
@ -4660,6 +4660,7 @@ il_force_reset(struct il_priv *il, bool external)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(il_force_reset);
|
||||
|
||||
int
|
||||
il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
|
@ -309,21 +309,20 @@ struct mlx5_hca_cap {
|
||||
__be16 max_desc_sz_rq;
|
||||
u8 rsvd21[2];
|
||||
__be16 max_desc_sz_sq_dc;
|
||||
u8 rsvd22[4];
|
||||
__be16 max_qp_mcg;
|
||||
u8 rsvd23;
|
||||
__be32 max_qp_mcg;
|
||||
u8 rsvd22[3];
|
||||
u8 log_max_mcg;
|
||||
u8 rsvd24;
|
||||
u8 rsvd23;
|
||||
u8 log_max_pd;
|
||||
u8 rsvd25;
|
||||
u8 rsvd24;
|
||||
u8 log_max_xrcd;
|
||||
u8 rsvd26[42];
|
||||
u8 rsvd25[42];
|
||||
__be16 log_uar_page_sz;
|
||||
u8 rsvd27[28];
|
||||
u8 rsvd26[28];
|
||||
u8 log_msx_atomic_size_qp;
|
||||
u8 rsvd28[2];
|
||||
u8 rsvd27[2];
|
||||
u8 log_msx_atomic_size_dc;
|
||||
u8 rsvd29[76];
|
||||
u8 rsvd28[76];
|
||||
};
|
||||
|
||||
|
||||
@ -472,9 +471,8 @@ struct mlx5_eqe_cmd {
|
||||
struct mlx5_eqe_page_req {
|
||||
u8 rsvd0[2];
|
||||
__be16 func_id;
|
||||
u8 rsvd1[2];
|
||||
__be16 num_pages;
|
||||
__be32 rsvd2[5];
|
||||
__be32 num_pages;
|
||||
__be32 rsvd1[5];
|
||||
};
|
||||
|
||||
union ev_data {
|
||||
|
@ -358,7 +358,7 @@ struct mlx5_caps {
|
||||
u32 reserved_lkey;
|
||||
u8 local_ca_ack_delay;
|
||||
u8 log_max_mcg;
|
||||
u16 max_qp_mcg;
|
||||
u32 max_qp_mcg;
|
||||
int min_page_sz;
|
||||
};
|
||||
|
||||
@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
|
||||
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
|
||||
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
|
||||
s16 npages);
|
||||
s32 npages);
|
||||
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
|
||||
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
|
||||
void mlx5_register_debugfs(void);
|
||||
@ -731,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
|
||||
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
|
||||
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
|
||||
|
||||
typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size);
|
||||
int mlx5_register_health_report_handler(health_handler_t handler);
|
||||
void mlx5_unregister_health_report_handler(void);
|
||||
const char *mlx5_command_str(int command);
|
||||
int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
|
||||
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
|
||||
|
@ -122,7 +122,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
|
||||
if (rc > 0)
|
||||
/* local bh are disabled so it is ok to use _BH */
|
||||
NET_ADD_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_LOWLATENCYRXPACKETS, rc);
|
||||
LINUX_MIB_BUSYPOLLRXPACKETS, rc);
|
||||
|
||||
} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
|
||||
!need_resched() && !busy_loop_timeout(end_time));
|
||||
@ -162,11 +162,6 @@ static inline bool sk_can_busy_loop(struct sock *sk)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool sk_busy_poll(struct sock *sk, int nonblock)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void skb_mark_napi_id(struct sk_buff *skb,
|
||||
struct napi_struct *napi)
|
||||
{
|
||||
|
@ -145,20 +145,6 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
|
||||
return INET_ECN_encapsulate(tos, inner);
|
||||
}
|
||||
|
||||
static inline void tunnel_ip_select_ident(struct sk_buff *skb,
|
||||
const struct iphdr *old_iph,
|
||||
struct dst_entry *dst)
|
||||
{
|
||||
struct iphdr *iph = ip_hdr(skb);
|
||||
|
||||
/* Use inner packet iph-id if possible. */
|
||||
if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
|
||||
iph->id = old_iph->id;
|
||||
else
|
||||
__ip_select_ident(iph, dst,
|
||||
(skb_shinfo(skb)->gso_segs ?: 1) - 1);
|
||||
}
|
||||
|
||||
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
|
||||
int iptunnel_xmit(struct net *net, struct rtable *rt,
|
||||
struct sk_buff *skb,
|
||||
|
@ -683,13 +683,19 @@ struct psched_ratecfg {
|
||||
u64 rate_bytes_ps; /* bytes per second */
|
||||
u32 mult;
|
||||
u16 overhead;
|
||||
u8 linklayer;
|
||||
u8 shift;
|
||||
};
|
||||
|
||||
static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
|
||||
unsigned int len)
|
||||
{
|
||||
return ((u64)(len + r->overhead) * r->mult) >> r->shift;
|
||||
len += r->overhead;
|
||||
|
||||
if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
|
||||
return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
|
||||
|
||||
return ((u64)len * r->mult) >> r->shift;
|
||||
}
|
||||
|
||||
extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
|
||||
@ -700,6 +706,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
|
||||
memset(res, 0, sizeof(*res));
|
||||
res->rate = r->rate_bytes_ps;
|
||||
res->overhead = r->overhead;
|
||||
res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -73,9 +73,17 @@ struct tc_estimator {
|
||||
#define TC_H_ROOT (0xFFFFFFFFU)
|
||||
#define TC_H_INGRESS (0xFFFFFFF1U)
|
||||
|
||||
/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
|
||||
enum tc_link_layer {
|
||||
TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
|
||||
TC_LINKLAYER_ETHERNET,
|
||||
TC_LINKLAYER_ATM,
|
||||
};
|
||||
#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
|
||||
|
||||
struct tc_ratespec {
|
||||
unsigned char cell_log;
|
||||
unsigned char __reserved;
|
||||
__u8 linklayer; /* lower 4 bits */
|
||||
unsigned short overhead;
|
||||
short cell_align;
|
||||
unsigned short mpu;
|
||||
|
@ -253,7 +253,7 @@ enum
|
||||
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */
|
||||
LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */
|
||||
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
|
||||
LINUX_MIB_LOWLATENCYRXPACKETS, /* LowLatencyRxPackets */
|
||||
LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */
|
||||
__LINUX_MIB_MAX
|
||||
};
|
||||
|
||||
|
@ -91,7 +91,12 @@ EXPORT_SYMBOL(__vlan_find_dev_deep);
|
||||
|
||||
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
|
||||
{
|
||||
return vlan_dev_priv(dev)->real_dev;
|
||||
struct net_device *ret = vlan_dev_priv(dev)->real_dev;
|
||||
|
||||
while (is_vlan_dev(ret))
|
||||
ret = vlan_dev_priv(ret)->real_dev;
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(vlan_dev_real_dev);
|
||||
|
||||
|
@ -1529,6 +1529,8 @@ out:
|
||||
* in these cases, the skb is further handled by this function and
|
||||
* returns 1, otherwise it returns 0 and the caller shall further
|
||||
* process the skb.
|
||||
*
|
||||
* This call might reallocate skb data.
|
||||
*/
|
||||
int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
|
||||
unsigned short vid)
|
||||
|
@ -508,6 +508,7 @@ out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* this call might reallocate skb data */
|
||||
static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
|
||||
{
|
||||
int ret = false;
|
||||
@ -568,6 +569,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* this call might reallocate skb data */
|
||||
bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
|
||||
{
|
||||
struct ethhdr *ethhdr;
|
||||
@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
|
||||
|
||||
if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
|
||||
return false;
|
||||
|
||||
/* skb->data might have been reallocated by pskb_may_pull() */
|
||||
ethhdr = (struct ethhdr *)skb->data;
|
||||
if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
|
||||
ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
|
||||
|
||||
udphdr = (struct udphdr *)(skb->data + *header_len);
|
||||
*header_len += sizeof(*udphdr);
|
||||
|
||||
@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
|
||||
return true;
|
||||
}
|
||||
|
||||
/* this call might reallocate skb data */
|
||||
bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
|
||||
struct sk_buff *skb, struct ethhdr *ethhdr)
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
|
||||
struct batadv_orig_node *orig_dst_node = NULL;
|
||||
struct batadv_gw_node *curr_gw = NULL;
|
||||
struct ethhdr *ethhdr;
|
||||
bool ret, out_of_range = false;
|
||||
unsigned int header_len = 0;
|
||||
uint8_t curr_tq_avg;
|
||||
@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
ethhdr = (struct ethhdr *)skb->data;
|
||||
orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
|
||||
ethhdr->h_dest);
|
||||
if (!orig_dst_node)
|
||||
|
@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
|
||||
void batadv_gw_node_purge(struct batadv_priv *bat_priv);
|
||||
int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
|
||||
bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
|
||||
bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
|
||||
struct sk_buff *skb, struct ethhdr *ethhdr);
|
||||
bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
|
||||
|
||||
#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
|
||||
|
@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
|
||||
if (batadv_bla_tx(bat_priv, skb, vid))
|
||||
goto dropped;
|
||||
|
||||
/* skb->data might have been reallocated by batadv_bla_tx() */
|
||||
ethhdr = (struct ethhdr *)skb->data;
|
||||
|
||||
/* Register the client MAC in the transtable */
|
||||
if (!is_multicast_ether_addr(ethhdr->h_source))
|
||||
batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
|
||||
@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb,
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* reminder: ethhdr might have become unusable from here on
|
||||
* (batadv_gw_is_dhcp_target() might have reallocated skb data)
|
||||
*/
|
||||
}
|
||||
|
||||
/* ethernet packet should be broadcasted */
|
||||
@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
|
||||
/* unicast packet */
|
||||
} else {
|
||||
if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
|
||||
ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
|
||||
ret = batadv_gw_out_of_range(bat_priv, skb);
|
||||
if (ret)
|
||||
goto dropped;
|
||||
}
|
||||
|
@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
|
||||
* @skb: the skb containing the payload to encapsulate
|
||||
* @orig_node: the destination node
|
||||
*
|
||||
* Returns false if the payload could not be encapsulated or true otherwise
|
||||
* Returns false if the payload could not be encapsulated or true otherwise.
|
||||
*
|
||||
* This call might reallocate skb data.
|
||||
*/
|
||||
static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
|
||||
struct batadv_orig_node *orig_node)
|
||||
@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
|
||||
* @orig_node: the destination node
|
||||
* @packet_subtype: the batman 4addr packet subtype to use
|
||||
*
|
||||
* Returns false if the payload could not be encapsulated or true otherwise
|
||||
* Returns false if the payload could not be encapsulated or true otherwise.
|
||||
*
|
||||
* This call might reallocate skb data.
|
||||
*/
|
||||
bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
|
||||
struct sk_buff *skb,
|
||||
@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
|
||||
struct batadv_neigh_node *neigh_node;
|
||||
int data_len = skb->len;
|
||||
int ret = NET_RX_DROP;
|
||||
unsigned int dev_mtu;
|
||||
unsigned int dev_mtu, header_len;
|
||||
|
||||
/* get routing information */
|
||||
if (is_multicast_ether_addr(ethhdr->h_dest)) {
|
||||
@ -429,10 +433,12 @@ find_router:
|
||||
switch (packet_type) {
|
||||
case BATADV_UNICAST:
|
||||
batadv_unicast_prepare_skb(skb, orig_node);
|
||||
header_len = sizeof(struct batadv_unicast_packet);
|
||||
break;
|
||||
case BATADV_UNICAST_4ADDR:
|
||||
batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
|
||||
packet_subtype);
|
||||
header_len = sizeof(struct batadv_unicast_4addr_packet);
|
||||
break;
|
||||
default:
|
||||
/* this function supports UNICAST and UNICAST_4ADDR only. It
|
||||
@ -441,6 +447,7 @@ find_router:
|
||||
goto out;
|
||||
}
|
||||
|
||||
ethhdr = (struct ethhdr *)(skb->data + header_len);
|
||||
unicast_packet = (struct batadv_unicast_packet *)skb->data;
|
||||
|
||||
/* inform the destination node that we are still missing a correct route
|
||||
|
@ -1195,7 +1195,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
|
||||
max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
|
||||
if (max_delay)
|
||||
group = &mld->mld_mca;
|
||||
} else if (skb->len >= sizeof(*mld2q)) {
|
||||
} else {
|
||||
if (!pskb_may_pull(skb, sizeof(*mld2q))) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Sysfs attributes of bridge ports
|
||||
* Sysfs attributes of bridge
|
||||
* Linux ethernet bridge
|
||||
*
|
||||
* Authors:
|
||||
|
@ -65,6 +65,7 @@ ipv6:
|
||||
nhoff += sizeof(struct ipv6hdr);
|
||||
break;
|
||||
}
|
||||
case __constant_htons(ETH_P_8021AD):
|
||||
case __constant_htons(ETH_P_8021Q): {
|
||||
const struct vlan_hdr *vlan;
|
||||
struct vlan_hdr _vlan;
|
||||
|
@ -1441,16 +1441,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
|
||||
atomic_set(&p->refcnt, 1);
|
||||
p->reachable_time =
|
||||
neigh_rand_reach_time(p->base_reachable_time);
|
||||
|
||||
if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
|
||||
kfree(p);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dev_hold(dev);
|
||||
p->dev = dev;
|
||||
write_pnet(&p->net, hold_net(net));
|
||||
p->sysctl_table = NULL;
|
||||
|
||||
if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
|
||||
release_net(net);
|
||||
dev_put(dev);
|
||||
kfree(p);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
write_lock_bh(&tbl->lock);
|
||||
p->next = tbl->parms.next;
|
||||
tbl->parms.next = p;
|
||||
|
@ -2156,7 +2156,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm,
|
||||
/* If aging addresses are supported device will need to
|
||||
* implement its own handler for this.
|
||||
*/
|
||||
if (ndm->ndm_state & NUD_PERMANENT) {
|
||||
if (!(ndm->ndm_state & NUD_PERMANENT)) {
|
||||
pr_info("%s: FDB only supports static addresses\n", dev->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -2384,7 +2384,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
struct nlattr *extfilt;
|
||||
u32 filter_mask = 0;
|
||||
|
||||
extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
|
||||
extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
|
||||
IFLA_EXT_MASK);
|
||||
if (extfilt)
|
||||
filter_mask = nla_get_u32(extfilt);
|
||||
|
@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
|
||||
}
|
||||
|
||||
return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
|
||||
net_adj) & ~(align - 1)) + (net_adj - 2);
|
||||
net_adj) & ~(align - 1)) + net_adj - 2;
|
||||
}
|
||||
|
||||
static void esp4_err(struct sk_buff *skb, u32 info)
|
||||
|
@ -71,7 +71,6 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/export.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/ip.h>
|
||||
@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
|
||||
if (!c)
|
||||
continue;
|
||||
|
||||
if (IS_LEAF(c)) {
|
||||
prefetch(rcu_dereference_rtnl(p->child[idx]));
|
||||
if (IS_LEAF(c))
|
||||
return (struct leaf *) c;
|
||||
}
|
||||
|
||||
/* Rescan start scanning in new node */
|
||||
p = (struct tnode *) c;
|
||||
|
@ -383,7 +383,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
|
||||
if (daddr)
|
||||
memcpy(&iph->daddr, daddr, 4);
|
||||
if (iph->daddr)
|
||||
return t->hlen;
|
||||
return t->hlen + sizeof(*iph);
|
||||
|
||||
return -(t->hlen + sizeof(*iph));
|
||||
}
|
||||
|
@ -76,9 +76,7 @@ int iptunnel_xmit(struct net *net, struct rtable *rt,
|
||||
iph->daddr = dst;
|
||||
iph->saddr = src;
|
||||
iph->ttl = ttl;
|
||||
tunnel_ip_select_ident(skb,
|
||||
(const struct iphdr *)skb_inner_network_header(skb),
|
||||
&rt->dst);
|
||||
__ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
|
||||
|
||||
err = ip_local_out(skb);
|
||||
if (unlikely(net_xmit_eval(err)))
|
||||
|
@ -273,7 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = {
|
||||
SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
|
||||
SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
|
||||
SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
|
||||
SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS),
|
||||
SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
|
||||
SNMP_MIB_SENTINEL
|
||||
};
|
||||
|
||||
|
@ -206,8 +206,8 @@ static u32 cubic_root(u64 a)
|
||||
*/
|
||||
static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
|
||||
{
|
||||
u64 offs;
|
||||
u32 delta, t, bic_target, max_cnt;
|
||||
u32 delta, bic_target, max_cnt;
|
||||
u64 offs, t;
|
||||
|
||||
ca->ack_cnt++; /* count the number of ACKs */
|
||||
|
||||
@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
|
||||
* if the cwnd < 1 million packets !!!
|
||||
*/
|
||||
|
||||
t = (s32)(tcp_time_stamp - ca->epoch_start);
|
||||
t += msecs_to_jiffies(ca->delay_min >> 3);
|
||||
/* change the unit from HZ to bictcp_HZ */
|
||||
t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3)
|
||||
- ca->epoch_start) << BICTCP_HZ) / HZ;
|
||||
t <<= BICTCP_HZ;
|
||||
do_div(t, HZ);
|
||||
|
||||
if (t < ca->bic_K) /* t - K */
|
||||
offs = ca->bic_K - t;
|
||||
@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
|
||||
return;
|
||||
|
||||
/* Discard delay samples right after fast recovery */
|
||||
if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
|
||||
if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
|
||||
return;
|
||||
|
||||
delay = (rtt_us << 3) / USEC_PER_MSEC;
|
||||
|
@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
|
||||
net_adj = 0;
|
||||
|
||||
return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
|
||||
net_adj) & ~(align - 1)) + (net_adj - 2);
|
||||
net_adj) & ~(align - 1)) + net_adj - 2;
|
||||
}
|
||||
|
||||
static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
|
@ -993,14 +993,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
|
||||
|
||||
if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
|
||||
#ifdef CONFIG_IPV6_SUBTREES
|
||||
if (fn->subtree)
|
||||
fn = fib6_lookup_1(fn->subtree, args + 1);
|
||||
if (fn->subtree) {
|
||||
struct fib6_node *sfn;
|
||||
sfn = fib6_lookup_1(fn->subtree,
|
||||
args + 1);
|
||||
if (!sfn)
|
||||
goto backtrack;
|
||||
fn = sfn;
|
||||
}
|
||||
#endif
|
||||
if (!fn || fn->fn_flags & RTN_RTINFO)
|
||||
if (fn->fn_flags & RTN_RTINFO)
|
||||
return fn;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IPV6_SUBTREES
|
||||
backtrack:
|
||||
#endif
|
||||
if (fn->fn_flags & RTN_ROOT)
|
||||
break;
|
||||
|
||||
|
@ -31,10 +31,12 @@
|
||||
#include "led.h"
|
||||
|
||||
#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
|
||||
#define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2)
|
||||
#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10)
|
||||
#define IEEE80211_AUTH_MAX_TRIES 3
|
||||
#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
|
||||
#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
|
||||
#define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2)
|
||||
#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10)
|
||||
#define IEEE80211_ASSOC_MAX_TRIES 3
|
||||
|
||||
@ -209,8 +211,9 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
|
||||
struct ieee80211_channel *channel,
|
||||
const struct ieee80211_ht_operation *ht_oper,
|
||||
const struct ieee80211_vht_operation *vht_oper,
|
||||
struct cfg80211_chan_def *chandef, bool verbose)
|
||||
struct cfg80211_chan_def *chandef, bool tracking)
|
||||
{
|
||||
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
|
||||
struct cfg80211_chan_def vht_chandef;
|
||||
u32 ht_cfreq, ret;
|
||||
|
||||
@ -229,7 +232,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
|
||||
ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
|
||||
channel->band);
|
||||
/* check that channel matches the right operating channel */
|
||||
if (channel->center_freq != ht_cfreq) {
|
||||
if (!tracking && channel->center_freq != ht_cfreq) {
|
||||
/*
|
||||
* It's possible that some APs are confused here;
|
||||
* Netgear WNDR3700 sometimes reports 4 higher than
|
||||
@ -237,11 +240,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
|
||||
* since we look at probe response/beacon data here
|
||||
* it should be OK.
|
||||
*/
|
||||
if (verbose)
|
||||
sdata_info(sdata,
|
||||
"Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
|
||||
channel->center_freq, ht_cfreq,
|
||||
ht_oper->primary_chan, channel->band);
|
||||
sdata_info(sdata,
|
||||
"Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
|
||||
channel->center_freq, ht_cfreq,
|
||||
ht_oper->primary_chan, channel->band);
|
||||
ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
|
||||
goto out;
|
||||
}
|
||||
@ -295,7 +297,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
|
||||
channel->band);
|
||||
break;
|
||||
default:
|
||||
if (verbose)
|
||||
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
|
||||
sdata_info(sdata,
|
||||
"AP VHT operation IE has invalid channel width (%d), disable VHT\n",
|
||||
vht_oper->chan_width);
|
||||
@ -304,7 +306,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
|
||||
}
|
||||
|
||||
if (!cfg80211_chandef_valid(&vht_chandef)) {
|
||||
if (verbose)
|
||||
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
|
||||
sdata_info(sdata,
|
||||
"AP VHT information is invalid, disable VHT\n");
|
||||
ret = IEEE80211_STA_DISABLE_VHT;
|
||||
@ -317,7 +319,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
|
||||
}
|
||||
|
||||
if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
|
||||
if (verbose)
|
||||
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
|
||||
sdata_info(sdata,
|
||||
"AP VHT information doesn't match HT, disable VHT\n");
|
||||
ret = IEEE80211_STA_DISABLE_VHT;
|
||||
@ -333,18 +335,27 @@ out:
|
||||
if (ret & IEEE80211_STA_DISABLE_VHT)
|
||||
vht_chandef = *chandef;
|
||||
|
||||
/*
|
||||
* Ignore the DISABLED flag when we're already connected and only
|
||||
* tracking the APs beacon for bandwidth changes - otherwise we
|
||||
* might get disconnected here if we connect to an AP, update our
|
||||
* regulatory information based on the AP's country IE and the
|
||||
* information we have is wrong/outdated and disables the channel
|
||||
* that we're actually using for the connection to the AP.
|
||||
*/
|
||||
while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
|
||||
IEEE80211_CHAN_DISABLED)) {
|
||||
tracking ? 0 :
|
||||
IEEE80211_CHAN_DISABLED)) {
|
||||
if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
|
||||
ret = IEEE80211_STA_DISABLE_HT |
|
||||
IEEE80211_STA_DISABLE_VHT;
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
|
||||
ret |= chandef_downgrade(chandef);
|
||||
}
|
||||
|
||||
if (chandef->width != vht_chandef.width && verbose)
|
||||
if (chandef->width != vht_chandef.width && !tracking)
|
||||
sdata_info(sdata,
|
||||
"capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
|
||||
|
||||
@ -384,7 +395,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
|
||||
|
||||
/* calculate new channel (type) based on HT/VHT operation IEs */
|
||||
flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper,
|
||||
vht_oper, &chandef, false);
|
||||
vht_oper, &chandef, true);
|
||||
|
||||
/*
|
||||
* Downgrade the new channel if we associated with restricted
|
||||
@ -3394,10 +3405,13 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
|
||||
|
||||
if (tx_flags == 0) {
|
||||
auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
|
||||
ifmgd->auth_data->timeout_started = true;
|
||||
auth_data->timeout_started = true;
|
||||
run_again(sdata, auth_data->timeout);
|
||||
} else {
|
||||
auth_data->timeout_started = false;
|
||||
auth_data->timeout =
|
||||
round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
|
||||
auth_data->timeout_started = true;
|
||||
run_again(sdata, auth_data->timeout);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -3434,7 +3448,11 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
|
||||
assoc_data->timeout_started = true;
|
||||
run_again(sdata, assoc_data->timeout);
|
||||
} else {
|
||||
assoc_data->timeout_started = false;
|
||||
assoc_data->timeout =
|
||||
round_jiffies_up(jiffies +
|
||||
IEEE80211_ASSOC_TIMEOUT_LONG);
|
||||
assoc_data->timeout_started = true;
|
||||
run_again(sdata, assoc_data->timeout);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -3829,7 +3847,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
|
||||
ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
|
||||
cbss->channel,
|
||||
ht_oper, vht_oper,
|
||||
&chandef, true);
|
||||
&chandef, false);
|
||||
|
||||
sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
|
||||
local->rx_chains);
|
||||
|
@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
|
||||
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
|
||||
__u32 seq, ack, sack, end, win, swin;
|
||||
s16 receiver_offset;
|
||||
bool res;
|
||||
bool res, in_recv_win;
|
||||
|
||||
/*
|
||||
* Get the required data from the packet.
|
||||
@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
|
||||
receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
|
||||
receiver->td_scale);
|
||||
|
||||
/* Is the ending sequence in the receive window (if available)? */
|
||||
in_recv_win = !receiver->td_maxwin ||
|
||||
after(end, sender->td_end - receiver->td_maxwin - 1);
|
||||
|
||||
pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
|
||||
before(seq, sender->td_maxend + 1),
|
||||
after(end, sender->td_end - receiver->td_maxwin - 1),
|
||||
(in_recv_win ? 1 : 0),
|
||||
before(sack, receiver->td_end + 1),
|
||||
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
|
||||
|
||||
if (before(seq, sender->td_maxend + 1) &&
|
||||
after(end, sender->td_end - receiver->td_maxwin - 1) &&
|
||||
in_recv_win &&
|
||||
before(sack, receiver->td_end + 1) &&
|
||||
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
|
||||
/*
|
||||
@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
|
||||
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
|
||||
"nf_ct_tcp: %s ",
|
||||
before(seq, sender->td_maxend + 1) ?
|
||||
after(end, sender->td_end - receiver->td_maxwin - 1) ?
|
||||
in_recv_win ?
|
||||
before(sack, receiver->td_end + 1) ?
|
||||
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
|
||||
: "ACK is under the lower bound (possible overly delayed ACK)"
|
||||
|
@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log,
|
||||
nfmsg->version = NFNETLINK_V0;
|
||||
nfmsg->res_id = htons(inst->group_num);
|
||||
|
||||
memset(&pmsg, 0, sizeof(pmsg));
|
||||
pmsg.hw_protocol = skb->protocol;
|
||||
pmsg.hook = hooknum;
|
||||
|
||||
@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log,
|
||||
if (indev && skb->dev &&
|
||||
skb->mac_header != skb->network_header) {
|
||||
struct nfulnl_msg_packet_hw phw;
|
||||
int len = dev_parse_header(skb, phw.hw_addr);
|
||||
int len;
|
||||
|
||||
memset(&phw, 0, sizeof(phw));
|
||||
len = dev_parse_header(skb, phw.hw_addr);
|
||||
if (len > 0) {
|
||||
phw.hw_addrlen = htons(len);
|
||||
if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
|
||||
|
@ -463,7 +463,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
|
||||
if (indev && entskb->dev &&
|
||||
entskb->mac_header != entskb->network_header) {
|
||||
struct nfqnl_msg_packet_hw phw;
|
||||
int len = dev_parse_header(entskb, phw.hw_addr);
|
||||
int len;
|
||||
|
||||
memset(&phw, 0, sizeof(phw));
|
||||
len = dev_parse_header(entskb, phw.hw_addr);
|
||||
if (len) {
|
||||
phw.hw_addrlen = htons(len);
|
||||
if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
|
||||
|
@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
|
||||
{
|
||||
const struct xt_tcpmss_info *info = par->targinfo;
|
||||
struct tcphdr *tcph;
|
||||
unsigned int tcplen, i;
|
||||
int len, tcp_hdrlen;
|
||||
unsigned int i;
|
||||
__be16 oldval;
|
||||
u16 newmss;
|
||||
u8 *opt;
|
||||
@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
|
||||
if (!skb_make_writable(skb, skb->len))
|
||||
return -1;
|
||||
|
||||
tcplen = skb->len - tcphoff;
|
||||
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
|
||||
len = skb->len - tcphoff;
|
||||
if (len < (int)sizeof(struct tcphdr))
|
||||
return -1;
|
||||
|
||||
/* Header cannot be larger than the packet */
|
||||
if (tcplen < tcph->doff*4)
|
||||
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
|
||||
tcp_hdrlen = tcph->doff * 4;
|
||||
|
||||
if (len < tcp_hdrlen)
|
||||
return -1;
|
||||
|
||||
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
|
||||
@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
|
||||
newmss = info->mss;
|
||||
|
||||
opt = (u_int8_t *)tcph;
|
||||
for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
|
||||
if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
|
||||
opt[i+1] == TCPOLEN_MSS) {
|
||||
for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
|
||||
if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
|
||||
u_int16_t oldmss;
|
||||
|
||||
oldmss = (opt[i+2] << 8) | opt[i+3];
|
||||
@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
/* There is data after the header so the option can't be added
|
||||
without moving it, and doing so may make the SYN packet
|
||||
itself too large. Accept the packet unmodified instead. */
|
||||
if (tcplen > tcph->doff*4)
|
||||
* without moving it, and doing so may make the SYN packet
|
||||
* itself too large. Accept the packet unmodified instead.
|
||||
*/
|
||||
if (len > tcp_hdrlen)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
|
||||
newmss = min(newmss, (u16)1220);
|
||||
|
||||
opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
|
||||
memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
|
||||
memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
|
||||
|
||||
inet_proto_csum_replace2(&tcph->check, skb,
|
||||
htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
|
||||
htons(len), htons(len + TCPOLEN_MSS), 1);
|
||||
opt[0] = TCPOPT_MSS;
|
||||
opt[1] = TCPOLEN_MSS;
|
||||
opt[2] = (newmss & 0xff00) >> 8;
|
||||
|
@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
|
||||
struct tcphdr *tcph;
|
||||
u_int16_t n, o;
|
||||
u_int8_t *opt;
|
||||
int len;
|
||||
int len, tcp_hdrlen;
|
||||
|
||||
/* This is a fragment, no TCP header is available */
|
||||
if (par->fragoff != 0)
|
||||
@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
|
||||
return NF_DROP;
|
||||
|
||||
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
|
||||
if (tcph->doff * 4 > len)
|
||||
tcp_hdrlen = tcph->doff * 4;
|
||||
|
||||
if (len < tcp_hdrlen)
|
||||
return NF_DROP;
|
||||
|
||||
opt = (u_int8_t *)tcph;
|
||||
@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
|
||||
* Walk through all TCP options - if we find some option to remove,
|
||||
* set all octets to %TCPOPT_NOP and adjust checksum.
|
||||
*/
|
||||
for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) {
|
||||
for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
|
||||
optl = optlen(opt, i);
|
||||
|
||||
if (i + optl > tcp_hdrlen(skb))
|
||||
if (i + optl > tcp_hdrlen)
|
||||
break;
|
||||
|
||||
if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
|
||||
|
@ -789,6 +789,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
struct net *net = sock_net(skb->sk);
|
||||
int chains_to_skip = cb->args[0];
|
||||
int fams_to_skip = cb->args[1];
|
||||
bool need_locking = chains_to_skip || fams_to_skip;
|
||||
|
||||
if (need_locking)
|
||||
genl_lock();
|
||||
|
||||
for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
|
||||
n = 0;
|
||||
@ -810,6 +814,9 @@ errout:
|
||||
cb->args[0] = i;
|
||||
cb->args[1] = n;
|
||||
|
||||
if (need_locking)
|
||||
genl_unlock();
|
||||
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
|
@ -535,6 +535,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
|
||||
{
|
||||
struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
|
||||
|
||||
OVS_CB(skb)->tun_key = NULL;
|
||||
return do_execute_actions(dp, skb, acts->actions,
|
||||
acts->actions_len, false);
|
||||
}
|
||||
|
@ -2076,9 +2076,6 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
|
||||
ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
|
||||
return 0;
|
||||
|
||||
rtnl_unlock();
|
||||
return 0;
|
||||
|
||||
exit_free:
|
||||
kfree_skb(reply);
|
||||
exit_unlock:
|
||||
|
@ -240,7 +240,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
|
||||
struct flex_array *buckets;
|
||||
int i, err;
|
||||
|
||||
buckets = flex_array_alloc(sizeof(struct hlist_head *),
|
||||
buckets = flex_array_alloc(sizeof(struct hlist_head),
|
||||
n_buckets, GFP_KERNEL);
|
||||
if (!buckets)
|
||||
return NULL;
|
||||
|
@ -285,6 +285,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
|
||||
return q;
|
||||
}
|
||||
|
||||
/* The linklayer setting were not transferred from iproute2, in older
|
||||
* versions, and the rate tables lookup systems have been dropped in
|
||||
* the kernel. To keep backward compatible with older iproute2 tc
|
||||
* utils, we detect the linklayer setting by detecting if the rate
|
||||
* table were modified.
|
||||
*
|
||||
* For linklayer ATM table entries, the rate table will be aligned to
|
||||
* 48 bytes, thus some table entries will contain the same value. The
|
||||
* mpu (min packet unit) is also encoded into the old rate table, thus
|
||||
* starting from the mpu, we find low and high table entries for
|
||||
* mapping this cell. If these entries contain the same value, when
|
||||
* the rate tables have been modified for linklayer ATM.
|
||||
*
|
||||
* This is done by rounding mpu to the nearest 48 bytes cell/entry,
|
||||
* and then roundup to the next cell, calc the table entry one below,
|
||||
* and compare.
|
||||
*/
|
||||
static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
|
||||
{
|
||||
int low = roundup(r->mpu, 48);
|
||||
int high = roundup(low+1, 48);
|
||||
int cell_low = low >> r->cell_log;
|
||||
int cell_high = (high >> r->cell_log) - 1;
|
||||
|
||||
/* rtab is too inaccurate at rates > 100Mbit/s */
|
||||
if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
|
||||
pr_debug("TC linklayer: Giving up ATM detection\n");
|
||||
return TC_LINKLAYER_ETHERNET;
|
||||
}
|
||||
|
||||
if ((cell_high > cell_low) && (cell_high < 256)
|
||||
&& (rtab[cell_low] == rtab[cell_high])) {
|
||||
pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
|
||||
cell_low, cell_high, rtab[cell_high]);
|
||||
return TC_LINKLAYER_ATM;
|
||||
}
|
||||
return TC_LINKLAYER_ETHERNET;
|
||||
}
|
||||
|
||||
static struct qdisc_rate_table *qdisc_rtab_list;
|
||||
|
||||
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
|
||||
@ -308,6 +347,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
|
||||
rtab->rate = *r;
|
||||
rtab->refcnt = 1;
|
||||
memcpy(rtab->data, nla_data(tab), 1024);
|
||||
if (r->linklayer == TC_LINKLAYER_UNAWARE)
|
||||
r->linklayer = __detect_linklayer(r, rtab->data);
|
||||
rtab->next = qdisc_rtab_list;
|
||||
qdisc_rtab_list = rtab;
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <net/sch_generic.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include <net/dst.h>
|
||||
@ -207,15 +208,19 @@ void __qdisc_run(struct Qdisc *q)
|
||||
|
||||
unsigned long dev_trans_start(struct net_device *dev)
|
||||
{
|
||||
unsigned long val, res = dev->trans_start;
|
||||
unsigned long val, res;
|
||||
unsigned int i;
|
||||
|
||||
if (is_vlan_dev(dev))
|
||||
dev = vlan_dev_real_dev(dev);
|
||||
res = dev->trans_start;
|
||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
||||
val = netdev_get_tx_queue(dev, i)->trans_start;
|
||||
if (val && time_after(val, res))
|
||||
res = val;
|
||||
}
|
||||
dev->trans_start = res;
|
||||
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(dev_trans_start);
|
||||
@ -904,6 +909,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r,
|
||||
memset(r, 0, sizeof(*r));
|
||||
r->overhead = conf->overhead;
|
||||
r->rate_bytes_ps = conf->rate;
|
||||
r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
|
||||
r->mult = 1;
|
||||
/*
|
||||
* The deal here is to replace a divide by a reciprocal one
|
||||
|
@ -1329,6 +1329,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
||||
struct htb_sched *q = qdisc_priv(sch);
|
||||
struct htb_class *cl = (struct htb_class *)*arg, *parent;
|
||||
struct nlattr *opt = tca[TCA_OPTIONS];
|
||||
struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
|
||||
struct nlattr *tb[TCA_HTB_MAX + 1];
|
||||
struct tc_htb_opt *hopt;
|
||||
|
||||
@ -1350,6 +1351,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
||||
if (!hopt->rate.rate || !hopt->ceil.rate)
|
||||
goto failure;
|
||||
|
||||
/* Keeping backward compatible with rate_table based iproute2 tc */
|
||||
if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) {
|
||||
rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
|
||||
if (rtab)
|
||||
qdisc_put_rtab(rtab);
|
||||
}
|
||||
if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) {
|
||||
ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
|
||||
if (ctab)
|
||||
qdisc_put_rtab(ctab);
|
||||
}
|
||||
|
||||
if (!cl) { /* new class */
|
||||
struct Qdisc *new_q;
|
||||
int prio;
|
||||
|
@ -846,12 +846,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
|
||||
else
|
||||
spc_state = SCTP_ADDR_AVAILABLE;
|
||||
/* Don't inform ULP about transition from PF to
|
||||
* active state and set cwnd to 1, see SCTP
|
||||
* active state and set cwnd to 1 MTU, see SCTP
|
||||
* Quick failover draft section 5.1, point 5
|
||||
*/
|
||||
if (transport->state == SCTP_PF) {
|
||||
ulp_notify = false;
|
||||
transport->cwnd = 1;
|
||||
transport->cwnd = asoc->pathmtu;
|
||||
}
|
||||
transport->state = SCTP_ACTIVE;
|
||||
break;
|
||||
|
@ -181,12 +181,12 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
|
||||
return;
|
||||
}
|
||||
|
||||
call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
|
||||
|
||||
sctp_packet_free(&transport->packet);
|
||||
|
||||
if (transport->asoc)
|
||||
sctp_association_put(transport->asoc);
|
||||
|
||||
call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
|
||||
}
|
||||
|
||||
/* Start T3_rtx timer if it is not already running and update the heartbeat
|
||||
|
@ -460,6 +460,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
|
||||
{
|
||||
struct tipc_link *l_ptr;
|
||||
struct tipc_link *temp_l_ptr;
|
||||
struct tipc_link_req *temp_req;
|
||||
|
||||
pr_info("Disabling bearer <%s>\n", b_ptr->name);
|
||||
spin_lock_bh(&b_ptr->lock);
|
||||
@ -468,9 +469,13 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
|
||||
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
|
||||
tipc_link_delete(l_ptr);
|
||||
}
|
||||
if (b_ptr->link_req)
|
||||
tipc_disc_delete(b_ptr->link_req);
|
||||
temp_req = b_ptr->link_req;
|
||||
b_ptr->link_req = NULL;
|
||||
spin_unlock_bh(&b_ptr->lock);
|
||||
|
||||
if (temp_req)
|
||||
tipc_disc_delete(temp_req);
|
||||
|
||||
memset(b_ptr, 0, sizeof(struct tipc_bearer));
|
||||
}
|
||||
|
||||
|
@ -347,7 +347,7 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
|
||||
for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
|
||||
struct vsock_sock *vsk;
|
||||
list_for_each_entry(vsk, &vsock_connected_table[i],
|
||||
connected_table);
|
||||
connected_table)
|
||||
fn(sk_vsock(vsk));
|
||||
}
|
||||
|
||||
|
@ -765,6 +765,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
|
||||
cfg80211_leave_mesh(rdev, dev);
|
||||
break;
|
||||
case NL80211_IFTYPE_AP:
|
||||
case NL80211_IFTYPE_P2P_GO:
|
||||
cfg80211_stop_ap(rdev, dev);
|
||||
break;
|
||||
default:
|
||||
|
@ -441,10 +441,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
|
||||
goto out_unlock;
|
||||
}
|
||||
*rdev = wiphy_to_dev((*wdev)->wiphy);
|
||||
cb->args[0] = (*rdev)->wiphy_idx;
|
||||
/* 0 is the first index - add 1 to parse only once */
|
||||
cb->args[0] = (*rdev)->wiphy_idx + 1;
|
||||
cb->args[1] = (*wdev)->identifier;
|
||||
} else {
|
||||
struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0]);
|
||||
/* subtract the 1 again here */
|
||||
struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
|
||||
struct wireless_dev *tmp;
|
||||
|
||||
if (!wiphy) {
|
||||
|
Loading…
Reference in New Issue
Block a user