Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) nfnetlink timestamp taken from wrong skb, fix from Florian Westphal.

 2) Revert some msleep conversions in rtlwifi as these spots are in
    atomic context, from Larry Finger.

 3) Validate that NFTA_SET_TABLE attribute is actually specified when we
    call nf_tables_getset().  From Phil Turnbull.

 4) Don't do mdio_reset in stmmac driver with spinlock held as that can
    sleep, from Vincent Palatin.

 5) sk_filter() does things other than run a BPF filter, so we should
    not elide it's call just because sk->sk_filter is NULL.  Fix from
    Eric Dumazet.

 6) Fix missing backlog updates in several packet schedulers, from Cong
    Wang.

 7) bnx2x driver should allow VLAN add/remove while the interface is
    down, from Michal Schmidt.

 8) Several RDS/TCP race fixes from Sowmini Varadhan.

 9) fq_codel scheduler doesn't return correct queue length in dumps,
    from Eric Dumazet.

10) Fix TCP stats for tail loss probe and early retransmit in ipv6, from
    Yuchung Cheng.

11) Properly initialize udp_tunnel_socket_cfg in l2tp_tunnel_create(),
    from Guillaume Nault.

12) qfq scheduler leaks SKBs if a kzalloc fails, fix from Florian
    Westphal.

13) sock_fprog passed into PACKET_FANOUT_DATA needs compat handling,
    from Willem de Bruijn.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (85 commits)
  vmxnet3: segCnt can be 1 for LRO packets
  packet: compat support for sock_fprog
  stmmac: fix parameter to dwmac4_set_umac_addr()
  net/mlx5e: Fix blue flame quota logic
  net/mlx5e: Use ndo_stop explicitly at shutdown flow
  net/mlx5: E-Switch, always set mc_promisc for allmulti vports
  net/mlx5: E-Switch, Modify node guid on vf set MAC
  net/mlx5: E-Switch, Fix vport enable flow
  net/mlx5: E-Switch, Use the correct error check on returned pointers
  net/mlx5: E-Switch, Use the correct free() function
  net/mlx5: Fix E-Switch flow steering capabilities check
  net/mlx5: Fix flow steering NIC capabilities check
  net/mlx5: Fix root flow table update
  net/mlx5: Fix MLX5_CMD_OP_MAX to be defined correctly
  net/mlx5: Fix masking of reserved bits in XRCD number
  net/mlx5: Fix the size of modify QP mailbox
  mlxsw: spectrum: Don't sleep during ndo_get_phys_port_name()
  mlxsw: spectrum: Make split flow match firmware requirements
  wext: Fix 32 bit iwpriv compatibility issue with 64 bit Kernel
  cfg80211: remove get/set antenna and tx power warnings
  ...
This commit is contained in:
Linus Torvalds 2016-06-10 08:32:24 -07:00
commit 698ea54dde
93 changed files with 830 additions and 462 deletions

View File

@ -8009,6 +8009,7 @@ Q: http://patchwork.kernel.org/project/linux-wireless/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git
S: Maintained
F: Documentation/devicetree/bindings/net/wireless/
F: drivers/net/wireless/
NETXEN (1/10) GbE SUPPORT

View File

@ -12895,52 +12895,71 @@ static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
return rc;
}
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
{
struct bnx2x_vlan_entry *vlan;
int rc = 0;
if (!bp->vlan_cnt) {
DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
return 0;
}
/* Configure all non-configured entries */
list_for_each_entry(vlan, &bp->vlan_reg, link) {
/* Prepare for cleanup in case of errors */
if (rc) {
vlan->hw = false;
continue;
}
if (!vlan->hw)
if (vlan->hw)
continue;
DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
if (bp->vlan_cnt >= bp->vlan_credit)
return -ENOBUFS;
rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
if (rc) {
BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
vlan->hw = false;
rc = -EINVAL;
continue;
BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
return rc;
}
DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
vlan->hw = true;
bp->vlan_cnt++;
}
return rc;
return 0;
}
static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
{
bool need_accept_any_vlan;
need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
if (bp->accept_any_vlan != need_accept_any_vlan) {
bp->accept_any_vlan = need_accept_any_vlan;
DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
bp->accept_any_vlan ? "raised" : "cleared");
if (set_rx_mode) {
if (IS_PF(bp))
bnx2x_set_rx_mode_inner(bp);
else
bnx2x_vfpf_storm_rx_mode(bp);
}
}
}
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
{
struct bnx2x_vlan_entry *vlan;
/* The hw forgot all entries after reload */
list_for_each_entry(vlan, &bp->vlan_reg, link)
vlan->hw = false;
bp->vlan_cnt = 0;
/* Don't set rx mode here. Our caller will do it. */
bnx2x_vlan_configure(bp, false);
return 0;
}
static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
bool hw = false;
int rc = 0;
if (!netif_running(bp->dev)) {
DP(NETIF_MSG_IFUP,
"Ignoring VLAN configuration the interface is down\n");
return -EFAULT;
}
DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
@ -12948,93 +12967,47 @@ static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
if (!vlan)
return -ENOMEM;
bp->vlan_cnt++;
if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
bp->accept_any_vlan = true;
if (IS_PF(bp))
bnx2x_set_rx_mode_inner(bp);
else
bnx2x_vfpf_storm_rx_mode(bp);
} else if (bp->vlan_cnt <= bp->vlan_credit) {
rc = __bnx2x_vlan_configure_vid(bp, vid, true);
hw = true;
}
vlan->vid = vid;
vlan->hw = hw;
vlan->hw = false;
list_add_tail(&vlan->link, &bp->vlan_reg);
if (!rc) {
list_add(&vlan->link, &bp->vlan_reg);
} else {
bp->vlan_cnt--;
kfree(vlan);
}
if (netif_running(dev))
bnx2x_vlan_configure(bp, true);
DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
return rc;
return 0;
}
static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
bool found = false;
int rc = 0;
if (!netif_running(bp->dev)) {
DP(NETIF_MSG_IFUP,
"Ignoring VLAN configuration the interface is down\n");
return -EFAULT;
}
DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
if (!bp->vlan_cnt) {
BNX2X_ERR("Unable to kill VLAN %d\n", vid);
return -EINVAL;
}
list_for_each_entry(vlan, &bp->vlan_reg, link)
if (vlan->vid == vid)
if (vlan->vid == vid) {
found = true;
break;
}
if (vlan->vid != vid) {
if (!found) {
BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
return -EINVAL;
}
if (vlan->hw)
if (netif_running(dev) && vlan->hw) {
rc = __bnx2x_vlan_configure_vid(bp, vid, false);
DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
bp->vlan_cnt--;
}
list_del(&vlan->link);
kfree(vlan);
bp->vlan_cnt--;
if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
/* Configure all non-configured entries */
list_for_each_entry(vlan, &bp->vlan_reg, link) {
if (vlan->hw)
continue;
rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
if (rc) {
BNX2X_ERR("Unable to config VLAN %d\n",
vlan->vid);
continue;
}
DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
vlan->vid);
vlan->hw = true;
}
DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
bp->accept_any_vlan = false;
if (IS_PF(bp))
bnx2x_set_rx_mode_inner(bp);
else
bnx2x_vfpf_storm_rx_mode(bp);
}
if (netif_running(dev))
bnx2x_vlan_configure(bp, true);
DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);

View File

@ -286,7 +286,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
txr->tx_prod = prod;
tx_buf->is_push = 1;
netdev_tx_sent_queue(txq, skb->len);
wmb(); /* Sync is_push and byte queue before pushing data */
push_len = (length + sizeof(*tx_push) + 7) / 8;
if (push_len > 16) {
@ -298,7 +300,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
push_len);
}
tx_buf->is_push = 1;
goto tx_done;
}
@ -1112,19 +1113,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
netdev_features_t features = skb->dev->features;
if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
(skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vlan_proto = tpa_info->metadata >>
RX_CMP_FLAGS2_METADATA_TPID_SFT;
u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
vlan_proto == ETH_P_8021Q) ||
((features & NETIF_F_HW_VLAN_STAG_RX) &&
vlan_proto == ETH_P_8021AD)) {
__vlan_hwaccel_put_tag(skb, htons(vlan_proto),
tpa_info->metadata &
RX_CMP_FLAGS2_METADATA_VID_MASK);
}
__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
skb_checksum_none_assert(skb);
@ -1277,19 +1272,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
skb->protocol = eth_type_trans(skb, dev);
if (rxcmp1->rx_cmp_flags2 &
cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
netdev_features_t features = skb->dev->features;
if ((rxcmp1->rx_cmp_flags2 &
cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
(skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
vlan_proto == ETH_P_8021Q) ||
((features & NETIF_F_HW_VLAN_STAG_RX) &&
vlan_proto == ETH_P_8021AD))
__vlan_hwaccel_put_tag(skb, htons(vlan_proto),
meta_data &
RX_CMP_FLAGS2_METADATA_VID_MASK);
__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
skb_checksum_none_assert(skb);
@ -5466,6 +5456,20 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if (!bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
/* Both CTAG and STAG VLAN accelaration on the RX side have to be
* turned on or off together.
*/
if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX);
else
features |= NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX;
}
return features;
}

View File

@ -144,6 +144,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */

View File

@ -1195,7 +1195,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv->mdio = mdiobus_alloc();
if (!priv->mdio) {
ret = -ENOMEM;
goto free;
goto free2;
}
priv->mdio->name = "ethoc-mdio";
@ -1208,7 +1208,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
goto free;
goto free2;
}
ret = ethoc_mdio_probe(netdev);
@ -1241,9 +1241,10 @@ error2:
error:
mdiobus_unregister(priv->mdio);
mdiobus_free(priv->mdio);
free:
free2:
if (priv->clk)
clk_disable_unprepare(priv->clk);
free:
free_netdev(netdev);
out:
return ret;

View File

@ -2416,24 +2416,24 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames > 255) {
pr_err("Rx coalesced frames exceed hardware limiation");
pr_err("Rx coalesced frames exceed hardware limitation\n");
return -EINVAL;
}
if (ec->tx_max_coalesced_frames > 255) {
pr_err("Tx coalesced frame exceed hardware limiation");
pr_err("Tx coalesced frame exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
if (cycle > 0xFFFF) {
pr_err("Rx coalesed usec exceeed hardware limiation");
pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
if (cycle > 0xFFFF) {
pr_err("Rx coalesed usec exceeed hardware limiation");
pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}

View File

@ -2440,7 +2440,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->tx_ring_size);
if (likely(!nr_frags)) {
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
if (likely(!do_tstamp))
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
u32 lstatus_start = lstatus;

View File

@ -133,6 +133,8 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
static void mtk_phy_link_adjust(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
u16 lcl_adv = 0, rmt_adv = 0;
u8 flowctrl;
u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
@ -150,11 +152,30 @@ static void mtk_phy_link_adjust(struct net_device *dev)
if (mac->phy_dev->link)
mcr |= MAC_MCR_FORCE_LINK;
if (mac->phy_dev->duplex)
if (mac->phy_dev->duplex) {
mcr |= MAC_MCR_FORCE_DPX;
if (mac->phy_dev->pause)
mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
if (mac->phy_dev->pause)
rmt_adv = LPA_PAUSE_CAP;
if (mac->phy_dev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
if (mac->phy_dev->advertising & ADVERTISED_Pause)
lcl_adv |= ADVERTISE_PAUSE_CAP;
if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
lcl_adv |= ADVERTISE_PAUSE_ASYM;
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
mcr |= MAC_MCR_FORCE_TX_FC;
if (flowctrl & FLOW_CTRL_RX)
mcr |= MAC_MCR_FORCE_RX_FC;
netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
}
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
@ -208,10 +229,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
u32 val, ge_mode;
np = of_parse_phandle(mac->of_node, "phy-handle", 0);
if (!np && of_phy_is_fixed_link(mac->of_node))
if (!of_phy_register_fixed_link(mac->of_node))
np = of_node_get(mac->of_node);
if (!np)
return -ENODEV;
switch (of_get_phy_mode(np)) {
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
ge_mode = 0;
break;
@ -236,7 +263,8 @@ static int mtk_phy_connect(struct mtk_mac *mac)
mac->phy_dev->autoneg = AUTONEG_ENABLE;
mac->phy_dev->speed = 0;
mac->phy_dev->duplex = 0;
mac->phy_dev->supported &= PHY_BASIC_FEATURES;
mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause;
mac->phy_dev->advertising = mac->phy_dev->supported |
ADVERTISED_Autoneg;
phy_start_aneg(mac->phy_dev);
@ -280,7 +308,7 @@ static int mtk_mdio_init(struct mtk_eth *eth)
return 0;
err_free_bus:
kfree(eth->mii_bus);
mdiobus_free(eth->mii_bus);
err_put_node:
of_node_put(mii_np);
@ -295,7 +323,7 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
mdiobus_unregister(eth->mii_bus);
of_node_put(eth->mii_bus->dev.of_node);
kfree(eth->mii_bus);
mdiobus_free(eth->mii_bus);
}
static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)

View File

@ -3192,10 +3192,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
flush_workqueue(priv->wq);
if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
netif_device_detach(netdev);
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_close_locked(netdev);
mutex_unlock(&priv->state_lock);
mlx5e_close(netdev);
} else {
unregister_netdev(netdev);
}

View File

@ -317,7 +317,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
while ((sq->pc & wq->sz_m1) > sq->edge)
mlx5e_send_nop(sq, false);
sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
if (bf)
sq->bf_budget--;
sq->stats.packets++;
sq->stats.bytes += num_bytes;

View File

@ -383,7 +383,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
match_v,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
if (IS_ERR_OR_NULL(flow_rule)) {
if (IS_ERR(flow_rule)) {
pr_warn(
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
@ -457,7 +457,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
if (IS_ERR_OR_NULL(fdb)) {
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
goto out;
@ -474,7 +474,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
eth_broadcast_addr(dmac);
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR_OR_NULL(g)) {
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create flow group err(%d)\n", err);
goto out;
@ -489,7 +489,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
eth_zero_addr(dmac);
dmac[0] = 0x01;
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR_OR_NULL(g)) {
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
goto out;
@ -506,7 +506,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR_OR_NULL(g)) {
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
goto out;
@ -529,7 +529,7 @@ out:
}
}
kfree(flow_group_in);
kvfree(flow_group_in);
return err;
}
@ -651,6 +651,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
esw_fdb_set_vport_rule(esw,
mac,
vport_idx);
iter_vaddr->mc_promisc = true;
break;
case MLX5_ACTION_DEL:
if (!iter_vaddr)
@ -1060,7 +1061,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
return;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
if (IS_ERR_OR_NULL(acl)) {
if (IS_ERR(acl)) {
err = PTR_ERR(acl);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
vport->vport, err);
@ -1075,7 +1076,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
if (IS_ERR_OR_NULL(vlan_grp)) {
if (IS_ERR(vlan_grp)) {
err = PTR_ERR(vlan_grp);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
vport->vport, err);
@ -1086,7 +1087,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
drop_grp = mlx5_create_flow_group(acl, flow_group_in);
if (IS_ERR_OR_NULL(drop_grp)) {
if (IS_ERR(drop_grp)) {
err = PTR_ERR(drop_grp);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
vport->vport, err);
@ -1097,7 +1098,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
vport->egress.drop_grp = drop_grp;
vport->egress.allowed_vlans_grp = vlan_grp;
out:
kfree(flow_group_in);
kvfree(flow_group_in);
if (err && !IS_ERR_OR_NULL(vlan_grp))
mlx5_destroy_flow_group(vlan_grp);
if (err && !IS_ERR_OR_NULL(acl))
@ -1174,7 +1175,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
return;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
if (IS_ERR_OR_NULL(acl)) {
if (IS_ERR(acl)) {
err = PTR_ERR(acl);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
vport->vport, err);
@ -1192,7 +1193,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
g = mlx5_create_flow_group(acl, flow_group_in);
if (IS_ERR_OR_NULL(g)) {
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
vport->vport, err);
@ -1207,7 +1208,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
g = mlx5_create_flow_group(acl, flow_group_in);
if (IS_ERR_OR_NULL(g)) {
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
vport->vport, err);
@ -1223,7 +1224,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
g = mlx5_create_flow_group(acl, flow_group_in);
if (IS_ERR_OR_NULL(g)) {
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
vport->vport, err);
@ -1236,7 +1237,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
g = mlx5_create_flow_group(acl, flow_group_in);
if (IS_ERR_OR_NULL(g)) {
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
vport->vport, err);
@ -1259,7 +1260,7 @@ out:
mlx5_destroy_flow_table(vport->ingress.acl);
}
kfree(flow_group_in);
kvfree(flow_group_in);
}
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@ -1363,7 +1364,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
match_v,
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL);
if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) {
if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
vport->vport, err);
@ -1380,7 +1381,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
match_v,
MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL);
if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
vport->vport, err);
@ -1439,7 +1440,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
match_v,
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL);
if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
if (IS_ERR(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan);
pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
vport->vport, err);
@ -1457,7 +1458,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
match_v,
MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL);
if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
vport->vport, err);
@ -1491,14 +1492,11 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
/* Sync with current vport context */
vport->enabled_events = enable_events;
esw_vport_change_handle_locked(vport);
vport->enabled = true;
/* only PF is trusted by default */
vport->trusted = (vport_num) ? false : true;
arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
@ -1728,11 +1726,24 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
{
((u8 *)node_guid)[7] = mac[0];
((u8 *)node_guid)[6] = mac[1];
((u8 *)node_guid)[5] = mac[2];
((u8 *)node_guid)[4] = 0xff;
((u8 *)node_guid)[3] = 0xfe;
((u8 *)node_guid)[2] = mac[3];
((u8 *)node_guid)[1] = mac[4];
((u8 *)node_guid)[0] = mac[5];
}
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN])
{
int err = 0;
struct mlx5_vport *evport;
u64 node_guid;
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
@ -1756,11 +1767,17 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
return err;
}
node_guid_gen_from_mac(&node_guid, mac);
err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
if (err)
mlx5_core_warn(esw->dev,
"Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
vport, err);
mutex_lock(&esw->state_lock);
if (evport->enabled)
err = esw_vport_ingress_config(esw, evport);
mutex_unlock(&esw->state_lock);
return err;
}

View File

@ -1292,8 +1292,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
ft->id);
return err;
}
root->root_ft = new_root_ft;
}
root->root_ft = new_root_ft;
return 0;
}
@ -1767,6 +1767,9 @@ static void cleanup_root_ns(struct mlx5_core_dev *dev)
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return;
cleanup_root_ns(dev);
cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
@ -1828,29 +1831,36 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
{
int err = 0;
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return 0;
err = mlx5_init_fc_stats(dev);
if (err)
return err;
if (MLX5_CAP_GEN(dev, nic_flow_table)) {
if (MLX5_CAP_GEN(dev, nic_flow_table) &&
MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
err = init_root_ns(dev);
if (err)
goto err;
}
if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
err = init_fdb_root_ns(dev);
if (err)
goto err;
}
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
err = init_egress_acl_root_ns(dev);
if (err)
goto err;
}
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
err = init_ingress_acl_root_ns(dev);
if (err)
goto err;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
err = init_fdb_root_ns(dev);
if (err)
goto err;
}
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
err = init_egress_acl_root_ns(dev);
if (err)
goto err;
}
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
err = init_ingress_acl_root_ns(dev);
if (err)
goto err;
}
}
return 0;

View File

@ -418,7 +418,7 @@ int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
else
*xrcdn = be32_to_cpu(out.xrcdn);
*xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff;
return err;
}

View File

@ -508,6 +508,44 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
u32 vport, u64 node_guid)
{
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
void *nic_vport_context;
u8 *guid;
void *in;
int err;
if (!vport)
return -EINVAL;
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
return -EACCES;
if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
return -ENOTSUPP;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
MLX5_SET(modify_nic_vport_context_in, in,
field_select.node_guid, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context,
node_guid);
MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
kvfree(in);
return err;
}
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr)
{

View File

@ -247,13 +247,21 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
}
static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u8 swid)
{
char pspa_pl[MLXSW_REG_PSPA_LEN];
mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
}
static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pspa_pl[MLXSW_REG_PSPA_LEN];
mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
swid);
}
static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
@ -305,9 +313,9 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
}
static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
u8 local_port, u8 *p_module,
u8 *p_width, u8 *p_lane)
static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
u8 local_port, u8 *p_module,
u8 *p_width, u8 *p_lane)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int err;
@ -322,16 +330,6 @@ static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
return 0;
}
static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
u8 local_port, u8 *p_module,
u8 *p_width)
{
u8 lane;
return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
p_width, &lane);
}
static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u8 module, u8 width, u8 lane)
{
@ -949,17 +947,11 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
size_t len)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
u8 module, width, lane;
u8 module = mlxsw_sp_port->mapping.module;
u8 width = mlxsw_sp_port->mapping.width;
u8 lane = mlxsw_sp_port->mapping.lane;
int err;
err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_port->local_port,
&module, &width, &lane);
if (err) {
netdev_err(dev, "Failed to retrieve module information\n");
return err;
}
if (!mlxsw_sp_port->split)
err = snprintf(name, len, "p%d", module + 1);
else
@ -1681,8 +1673,8 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool split, u8 module, u8 width)
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool split, u8 module, u8 width, u8 lane)
{
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *dev;
@ -1697,6 +1689,9 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->split = split;
mlxsw_sp_port->mapping.module = module;
mlxsw_sp_port->mapping.width = width;
mlxsw_sp_port->mapping.lane = lane;
bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
if (!mlxsw_sp_port->active_vlans) {
@ -1839,28 +1834,6 @@ err_port_active_vlans_alloc:
return err;
}
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool split, u8 module, u8 width, u8 lane)
{
int err;
err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
lane);
if (err)
return err;
err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
width);
if (err)
goto err_port_create;
return 0;
err_port_create:
mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
return err;
}
static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct net_device *dev = mlxsw_sp_port->dev;
@ -1909,8 +1882,8 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
u8 module, width, lane;
size_t alloc_size;
u8 module, width;
int i;
int err;
@ -1921,13 +1894,14 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
&width);
&width, &lane);
if (err)
goto err_port_module_info_get;
if (!width)
continue;
mlxsw_sp->port_to_module[i] = module;
err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
lane);
if (err)
goto err_port_create;
}
@ -1948,12 +1922,85 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
return local_port - offset;
}
static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
u8 module, unsigned int count)
{
u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
int err, i;
for (i = 0; i < count; i++) {
err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
width, i * width);
if (err)
goto err_port_module_map;
}
for (i = 0; i < count; i++) {
err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
if (err)
goto err_port_swid_set;
}
for (i = 0; i < count; i++) {
err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
module, width, i * width);
if (err)
goto err_port_create;
}
return 0;
err_port_create:
for (i--; i >= 0; i--)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
i = count;
err_port_swid_set:
for (i--; i >= 0; i--)
__mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
MLXSW_PORT_SWID_DISABLED_PORT);
i = count;
err_port_module_map:
for (i--; i >= 0; i--)
mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
return err;
}
static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
u8 base_port, unsigned int count)
{
u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
int i;
/* Split by four means we need to re-create two ports, otherwise
* only one.
*/
count = count / 2;
for (i = 0; i < count; i++) {
local_port = base_port + i * 2;
module = mlxsw_sp->port_to_module[local_port];
mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
0);
}
for (i = 0; i < count; i++)
__mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
for (i = 0; i < count; i++) {
local_port = base_port + i * 2;
module = mlxsw_sp->port_to_module[local_port];
mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
width, 0);
}
}
static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
unsigned int count)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
u8 module, cur_width, base_port;
int i;
int err;
@ -1965,18 +2012,14 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
return -EINVAL;
}
module = mlxsw_sp_port->mapping.module;
cur_width = mlxsw_sp_port->mapping.width;
if (count != 2 && count != 4) {
netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
return -EINVAL;
}
err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
&cur_width);
if (err) {
netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
return err;
}
if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
return -EINVAL;
@ -2001,25 +2044,16 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
for (i = 0; i < count; i++)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
for (i = 0; i < count; i++) {
err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
module, width, i * width);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
goto err_port_create;
}
err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
goto err_port_split_create;
}
return 0;
err_port_create:
for (i--; i >= 0; i--)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
for (i = 0; i < count / 2; i++) {
module = mlxsw_sp->port_to_module[base_port + i * 2];
mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
}
err_port_split_create:
mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
return err;
}
@ -2027,10 +2061,9 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
u8 module, cur_width, base_port;
u8 cur_width, base_port;
unsigned int count;
int i;
int err;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
@ -2044,12 +2077,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
return -EINVAL;
}
err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
&cur_width);
if (err) {
netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
return err;
}
cur_width = mlxsw_sp_port->mapping.width;
count = cur_width == 1 ? 4 : 2;
base_port = mlxsw_sp_cluster_base_port_get(local_port);
@ -2061,14 +2089,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
for (i = 0; i < count; i++)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
for (i = 0; i < count / 2; i++) {
module = mlxsw_sp->port_to_module[base_port + i * 2];
err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
module, MLXSW_PORT_MODULE_MAX_WIDTH,
0);
if (err)
dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
}
mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
return 0;
}

View File

@ -229,6 +229,11 @@ struct mlxsw_sp_port {
struct ieee_maxrate *maxrate;
struct ieee_pfc *pfc;
} dcb;
struct {
u8 module;
u8 width;
u8 lane;
} mapping;
/* 802.1Q bridge VLANs */
unsigned long *active_vlans;
unsigned long *untagged_vlans;

View File

@ -1105,6 +1105,39 @@ static int qed_get_port_type(u32 media_type)
return port_type;
}
static int qed_get_link_data(struct qed_hwfn *hwfn,
struct qed_mcp_link_params *params,
struct qed_mcp_link_state *link,
struct qed_mcp_link_capabilities *link_caps)
{
void *p;
if (!IS_PF(hwfn->cdev)) {
qed_vf_get_link_params(hwfn, params);
qed_vf_get_link_state(hwfn, link);
qed_vf_get_link_caps(hwfn, link_caps);
return 0;
}
p = qed_mcp_get_link_params(hwfn);
if (!p)
return -ENXIO;
memcpy(params, p, sizeof(*params));
p = qed_mcp_get_link_state(hwfn);
if (!p)
return -ENXIO;
memcpy(link, p, sizeof(*link));
p = qed_mcp_get_link_capabilities(hwfn);
if (!p)
return -ENXIO;
memcpy(link_caps, p, sizeof(*link_caps));
return 0;
}
static void qed_fill_link(struct qed_hwfn *hwfn,
struct qed_link_output *if_link)
{
@ -1116,15 +1149,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
memset(if_link, 0, sizeof(*if_link));
/* Prepare source inputs */
if (IS_PF(hwfn->cdev)) {
memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
sizeof(link_caps));
} else {
qed_vf_get_link_params(hwfn, &params);
qed_vf_get_link_state(hwfn, &link);
qed_vf_get_link_caps(hwfn, &link_caps);
if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
return;
}
/* Set the link parameters to pass to protocol driver */

View File

@ -12,11 +12,13 @@
#include "qed_vf.h"
#define QED_VF_ARRAY_LENGTH (3)
#ifdef CONFIG_QED_SRIOV
#define IS_VF(cdev) ((cdev)->b_is_vf)
#define IS_PF(cdev) (!((cdev)->b_is_vf))
#ifdef CONFIG_QED_SRIOV
#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
#else
#define IS_VF(cdev) (0)
#define IS_PF(cdev) (1)
#define IS_PF_SRIOV(p_hwfn) (0)
#endif
#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))

View File

@ -87,7 +87,9 @@ static const struct pci_device_id qede_pci_tbl[] = {
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
#ifdef CONFIG_QED_SRIOV
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
#endif
{ 0 }
};

View File

@ -189,11 +189,12 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
result |= SUPPORTED_FIBRE;
break;
case MC_CMD_MEDIA_QSFP_PLUS:
result |= SUPPORTED_FIBRE;
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
result |= SUPPORTED_1000baseT_Full;
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
result |= SUPPORTED_10000baseT_Full;
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
result |= SUPPORTED_40000baseCR4_Full;
break;

View File

@ -156,7 +156,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
struct netdev_hw_addr *ha;
netdev_for_each_uc_addr(ha, dev) {
dwmac4_set_umac_addr(ioaddr, ha->addr, reg);
dwmac4_set_umac_addr(hw, ha->addr, reg);
reg++;
}
}

View File

@ -3450,8 +3450,6 @@ int stmmac_resume(struct device *dev)
if (!netif_running(ndev))
return 0;
spin_lock_irqsave(&priv->lock, flags);
/* Power Down bit, into the PM register, is cleared
* automatically as soon as a magic packet or a Wake-up frame
* is received. Anyway, it's better to manually clear
@ -3459,7 +3457,9 @@ int stmmac_resume(struct device *dev)
* from another devices (e.g. serial console).
*/
if (device_may_wakeup(priv->device)) {
spin_lock_irqsave(&priv->lock, flags);
priv->hw->mac->pmt(priv->hw, 0);
spin_unlock_irqrestore(&priv->lock, flags);
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
@ -3473,6 +3473,8 @@ int stmmac_resume(struct device *dev)
netif_device_attach(ndev);
spin_lock_irqsave(&priv->lock, flags);
priv->cur_rx = 0;
priv->dirty_rx = 0;
priv->dirty_tx = 0;

View File

@ -1339,7 +1339,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (priv->coal_intvl != 0) {
struct ethtool_coalesce coal;
coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
coal.rx_coalesce_usecs = priv->coal_intvl;
cpsw_set_coalesce(ndev, &coal);
}

View File

@ -1369,7 +1369,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
segCnt = rcdlro->segCnt;
BUG_ON(segCnt <= 1);
WARN_ON_ONCE(segCnt == 0);
mss = rcdlro->mss;
if (unlikely(segCnt <= 1))
segCnt = 0;

View File

@ -69,10 +69,10 @@
/*
* Version numbers
*/
#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
#define VMXNET3_DRIVER_VERSION_STRING "1.4.8.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
#define VMXNET3_DRIVER_VERSION_NUM 0x01040800
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */

View File

@ -2540,12 +2540,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
const u8 *mac, struct station_info *sinfo)
{
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_scb_val_le scb_val;
s32 err = 0;
struct brcmf_sta_info_le sta_info_le;
u32 sta_flags;
u32 is_tdls_peer;
s32 total_rssi;
s32 count_rssi;
int rssi;
u32 i;
brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
@ -2629,6 +2631,20 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
total_rssi /= count_rssi;
sinfo->signal = total_rssi;
} else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
&ifp->vif->sme_state)) {
memset(&scb_val, 0, sizeof(scb_val));
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
&scb_val, sizeof(scb_val));
if (err) {
brcmf_err("Could not get rssi (%d)\n", err);
goto done;
} else {
rssi = le32_to_cpu(scb_val.val);
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
sinfo->signal = rssi;
brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
}
}
}
done:

View File

@ -1157,6 +1157,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
brcmu_pkt_buf_free_skb(skb);
return;
}
skb->protocol = eth_type_trans(skb, ifp->ndev);
brcmf_netif_rx(ifp, skb);
}

View File

@ -2776,6 +2776,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
!info->attrs[HWSIM_ATTR_FLAGS] ||
!info->attrs[HWSIM_ATTR_COOKIE] ||
!info->attrs[HWSIM_ATTR_SIGNAL] ||
!info->attrs[HWSIM_ATTR_TX_INFO])
goto out;

View File

@ -54,7 +54,7 @@ EXPORT_SYMBOL(channel5g_80m);
void rtl_addr_delay(u32 addr)
{
if (addr == 0xfe)
msleep(50);
mdelay(50);
else if (addr == 0xfd)
msleep(5);
else if (addr == 0xfc)
@ -75,7 +75,7 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
rtl_addr_delay(addr);
} else {
rtl_set_rfreg(hw, rfpath, addr, mask, data);
usleep_range(1, 2);
udelay(1);
}
}
EXPORT_SYMBOL(rtl_rfreg_delay);
@ -86,7 +86,7 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
rtl_addr_delay(addr);
} else {
rtl_set_bbreg(hw, addr, MASKDWORD, data);
usleep_range(1, 2);
udelay(1);
}
}
EXPORT_SYMBOL(rtl_bb_delay);

View File

@ -1240,8 +1240,6 @@ struct mlx5_destroy_psv_out {
u8 rsvd[8];
};
#define MLX5_CMD_OP_MAX 0x920
enum {
VPORT_STATE_DOWN = 0x0,
VPORT_STATE_UP = 0x1,
@ -1369,6 +1367,12 @@ enum mlx5_cap_type {
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)

View File

@ -205,7 +205,8 @@ enum {
MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
MLX5_CMD_OP_MAX
};
struct mlx5_ifc_flow_table_fields_supported_bits {
@ -500,7 +501,9 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1];
u8 reserved_at_5[0x1b];
u8 reserved_at_5[0x19];
u8 nic_vport_node_guid_modify[0x1];
u8 nic_vport_port_guid_modify[0x1];
u8 reserved_at_20[0x7e0];
};
@ -4583,7 +4586,10 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
u8 reserved_at_0[0x19];
u8 reserved_at_0[0x16];
u8 node_guid[0x1];
u8 port_guid[0x1];
u8 reserved_at_18[0x1];
u8 mtu[0x1];
u8 change_event[0x1];
u8 promisc[0x1];

View File

@ -559,6 +559,7 @@ struct mlx5_modify_qp_mbox_in {
__be32 optparam;
u8 rsvd0[4];
struct mlx5_qp_context ctx;
u8 rsvd2[16];
};
struct mlx5_modify_qp_mbox_out {

View File

@ -50,6 +50,8 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
u32 vport, u64 node_guid);
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr);
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,

View File

@ -42,6 +42,7 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
struct sockaddr __user **, struct iovec **);
struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval);
asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
unsigned int);
asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,

View File

@ -1232,7 +1232,7 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
const char *ip_vs_state_name(__u16 proto, int state);
void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
int ip_vs_check_template(struct ip_vs_conn *ct);
int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest);
void ip_vs_random_dropentry(struct netns_ipvs *ipvs);
int ip_vs_conn_init(void);
void ip_vs_conn_cleanup(void);

View File

@ -28,8 +28,8 @@ struct nf_queue_handler {
struct nf_hook_ops *ops);
};
void nf_register_queue_handler(const struct nf_queue_handler *qh);
void nf_unregister_queue_handler(void);
void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
void nf_unregister_queue_handler(struct net *net);
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
void nf_queue_entry_get_refs(struct nf_queue_entry *entry);

View File

@ -5,11 +5,13 @@
struct proc_dir_entry;
struct nf_logger;
struct nf_queue_handler;
struct netns_nf {
#if defined CONFIG_PROC_FS
struct proc_dir_entry *proc_netfilter;
#endif
const struct nf_queue_handler __rcu *queue_handler;
const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;

View File

@ -392,16 +392,20 @@ struct tc_cls_u32_offload {
};
};
static inline bool tc_should_offload(struct net_device *dev, u32 flags)
static inline bool tc_should_offload(const struct net_device *dev,
const struct tcf_proto *tp, u32 flags)
{
const struct Qdisc *sch = tp->q;
const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
if (!(dev->features & NETIF_F_HW_TC))
return false;
if (flags & TCA_CLS_FLAGS_SKIP_HW)
return false;
if (!dev->netdev_ops->ndo_setup_tc)
return false;
if (cops && cops->tcf_cl_offload)
return cops->tcf_cl_offload(tp->classid);
return true;
}

View File

@ -168,6 +168,7 @@ struct Qdisc_class_ops {
/* Filter manipulation */
struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
bool (*tcf_cl_offload)(u32 classid);
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
u32 classid);
void (*unbind_tcf)(struct Qdisc *, unsigned long);
@ -691,9 +692,11 @@ static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
if (!sch->gso_skb) {
sch->gso_skb = sch->dequeue(sch);
if (sch->gso_skb)
if (sch->gso_skb) {
/* it's still part of the queue */
qdisc_qstats_backlog_inc(sch, sch->gso_skb);
sch->q.qlen++;
}
}
return sch->gso_skb;
@ -706,6 +709,7 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
if (skb) {
sch->gso_skb = NULL;
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
} else {
skb = sch->dequeue(sch);

View File

@ -1,5 +1,5 @@
#ifndef _UAPI_LINUX_GTP_H_
#define _UAPI_LINUX_GTP_H__
#define _UAPI_LINUX_GTP_H_
enum gtp_genl_cmds {
GTP_CMD_NEWPDP,

View File

@ -198,7 +198,7 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
if (unlikely(index >= array->map.max_entries))
return -E2BIG;
file = (struct file *)array->ptrs[index];
file = READ_ONCE(array->ptrs[index]);
if (unlikely(!file))
return -ENOENT;
@ -247,7 +247,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
if (unlikely(index >= array->map.max_entries))
return -E2BIG;
file = (struct file *)array->ptrs[index];
file = READ_ONCE(array->ptrs[index]);
if (unlikely(!file))
return -ENOENT;

View File

@ -279,6 +279,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
* change from under us.
*/
list_for_each_entry(v, &vg->vlan_list, vlist) {
if (!br_vlan_should_use(v))
continue;
f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
if (f && f->is_local && !f->dst)
fdb_delete_local(br, NULL, f);

View File

@ -309,8 +309,8 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
__scm_destroy(scm);
}
static int do_set_attach_filter(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
/* allocate a 64-bit sock_fprog on the user stack for duration of syscall. */
struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval)
{
struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval;
struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog));
@ -323,6 +323,19 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname,
__get_user(ptr, &fprog32->filter) ||
__put_user(len, &kfprog->len) ||
__put_user(compat_ptr(ptr), &kfprog->filter))
return NULL;
return kfprog;
}
EXPORT_SYMBOL_GPL(get_compat_bpf_fprog);
static int do_set_attach_filter(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock_fprog __user *kfprog;
kfprog = get_compat_bpf_fprog(optval);
if (!kfprog)
return -EFAULT;
return sock_setsockopt(sock, level, optname, (char __user *)kfprog,
@ -354,7 +367,8 @@ static int do_set_sock_timeout(struct socket *sock, int level,
static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (optname == SO_ATTACH_FILTER)
if (optname == SO_ATTACH_FILTER ||
optname == SO_ATTACH_REUSEPORT_CBPF)
return do_set_attach_filter(sock, level, optname,
optval, optlen);
if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)

View File

@ -47,6 +47,7 @@ nla_put_failure:
* @xstats_type: TLV type for backward compatibility xstats TLV
* @lock: statistics lock
* @d: dumping handle
* @padattr: padding attribute
*
* Initializes the dumping handle, grabs the statistic lock and appends
* an empty TLV header to the socket buffer for use a container for all
@ -87,6 +88,7 @@ EXPORT_SYMBOL(gnet_stats_start_copy_compat);
* @type: TLV type for top level statistic TLV
* @lock: statistics lock
* @d: dumping handle
* @padattr: padding attribute
*
* Initializes the dumping handle, grabs the statistic lock and appends
* an empty TLV header to the socket buffer for use a container for all

View File

@ -24,6 +24,7 @@
#include <linux/jiffies.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include "net-sysfs.h"

View File

@ -1618,12 +1618,12 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
}
if (rcu_access_pointer(sk->sk_filter)) {
if (udp_lib_checksum_complete(skb))
if (rcu_access_pointer(sk->sk_filter) &&
udp_lib_checksum_complete(skb))
goto csum_error;
if (sk_filter(sk, skb))
goto drop;
}
if (sk_filter(sk, skb))
goto drop;
udp_csum_pull_header(skb);
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {

View File

@ -1256,6 +1256,8 @@ static int ip6gre_tap_init(struct net_device *dev)
if (ret)
return ret;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
tunnel = netdev_priv(dev);
ip6gre_tnl_link_config(tunnel, 1);
@ -1289,6 +1291,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
dev->features |= NETIF_F_NETNS_LOCAL;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
}
static bool ip6gre_netlink_encap_parms(struct nlattr *data[],

View File

@ -1071,17 +1071,12 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst)
{
struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
int err;
dst = ip6_sk_dst_check(sk, dst, fl6);
if (!dst)
dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
if (err)
return ERR_PTR(err);
if (final_dst)
fl6->daddr = *final_dst;
return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
return dst;
}
EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);

View File

@ -33,6 +33,7 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb,
fl6.daddr = *gw;
fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) |
(iph->flow_lbl[1] << 8) | iph->flow_lbl[2]);
fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
dst_release(dst);

View File

@ -1721,7 +1721,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
destp = ntohs(inet->inet_dport);
srcp = ntohs(inet->inet_sport);
if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {

View File

@ -653,12 +653,12 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
}
if (rcu_access_pointer(sk->sk_filter)) {
if (udp_lib_checksum_complete(skb))
goto csum_error;
if (sk_filter(sk, skb))
goto drop;
}
if (rcu_access_pointer(sk->sk_filter) &&
udp_lib_checksum_complete(skb))
goto csum_error;
if (sk_filter(sk, skb))
goto drop;
udp_csum_pull_header(skb);
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {

View File

@ -1581,7 +1581,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
tunnel->encap = encap;
if (encap == L2TP_ENCAPTYPE_UDP) {
struct udp_tunnel_sock_cfg udp_cfg;
struct udp_tunnel_sock_cfg udp_cfg = { };
udp_cfg.sk_user_data = tunnel;
udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;

View File

@ -161,6 +161,10 @@ void mesh_sta_cleanup(struct sta_info *sta)
del_timer_sync(&sta->mesh->plink_timer);
}
/* make sure no readers can access nexthop sta from here on */
mesh_path_flush_by_nexthop(sta);
synchronize_net();
if (changed)
ieee80211_mbss_info_change_notify(sdata, changed);
}

View File

@ -280,7 +280,7 @@ struct ieee80211_fast_tx {
u8 sa_offs, da_offs, pn_offs;
u8 band;
u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
sizeof(rfc1042_header)];
sizeof(rfc1042_header)] __aligned(2);
struct rcu_head rcu_head;
};

View File

@ -762,7 +762,7 @@ static int expire_quiescent_template(struct netns_ipvs *ipvs,
* If available, return 1, otherwise invalidate this connection
* template and return 0.
*/
int ip_vs_check_template(struct ip_vs_conn *ct)
int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest)
{
struct ip_vs_dest *dest = ct->dest;
struct netns_ipvs *ipvs = ct->ipvs;
@ -772,7 +772,8 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
*/
if ((dest == NULL) ||
!(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
expire_quiescent_template(ipvs, dest)) {
expire_quiescent_template(ipvs, dest) ||
(cdest && (dest != cdest))) {
IP_VS_DBG_BUF(9, "check_template: dest not available for "
"protocol %s s:%s:%d v:%s:%d "
"-> d:%s:%d\n",

View File

@ -321,7 +321,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
/* Check if a template already exists */
ct = ip_vs_ct_in_get(&param);
if (!ct || !ip_vs_check_template(ct)) {
if (!ct || !ip_vs_check_template(ct, NULL)) {
struct ip_vs_scheduler *sched;
/*
@ -1154,7 +1154,8 @@ struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
vport, &param) < 0)
return NULL;
ct = ip_vs_ct_in_get(&param);
if (!ct) {
/* check if template exists and points to the same dest */
if (!ct || !ip_vs_check_template(ct, dest)) {
ct = ip_vs_conn_new(&param, dest->af, daddr, dport,
IP_VS_CONN_F_TEMPLATE, dest, 0);
if (!ct) {

View File

@ -632,6 +632,7 @@ static int __init nf_conntrack_ftp_init(void)
if (ret) {
pr_err("failed to register helper for pf: %d port: %d\n",
ftp[i][j].tuple.src.l3num, ports[i]);
ports_c = i;
nf_conntrack_ftp_fini();
return ret;
}

View File

@ -361,9 +361,10 @@ EXPORT_SYMBOL_GPL(nf_ct_helper_log);
int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
{
int ret = 0;
struct nf_conntrack_helper *cur;
struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
unsigned int h = helper_hash(&me->tuple);
struct nf_conntrack_helper *cur;
int ret = 0;
BUG_ON(me->expect_policy == NULL);
BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
@ -371,9 +372,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
mutex_lock(&nf_ct_helper_mutex);
hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 &&
cur->tuple.src.l3num == me->tuple.src.l3num &&
cur->tuple.dst.protonum == me->tuple.dst.protonum) {
if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, &mask)) {
ret = -EEXIST;
goto out;
}

View File

@ -271,6 +271,7 @@ static int __init nf_conntrack_irc_init(void)
if (ret) {
pr_err("failed to register helper for pf: %u port: %u\n",
irc[i].tuple.src.l3num, ports[i]);
ports_c = i;
nf_conntrack_irc_fini();
return ret;
}

View File

@ -223,6 +223,7 @@ static int __init nf_conntrack_sane_init(void)
if (ret) {
pr_err("failed to register helper for pf: %d port: %d\n",
sane[i][j].tuple.src.l3num, ports[i]);
ports_c = i;
nf_conntrack_sane_fini();
return ret;
}

View File

@ -1669,6 +1669,7 @@ static int __init nf_conntrack_sip_init(void)
if (ret) {
pr_err("failed to register helper for pf: %u port: %u\n",
sip[i][j].tuple.src.l3num, ports[i]);
ports_c = i;
nf_conntrack_sip_fini();
return ret;
}

View File

@ -487,8 +487,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
{ }
};
#define NET_NF_CONNTRACK_MAX 2089
static struct ctl_table nf_ct_netfilter_table[] = {
{
.procname = "nf_conntrack_max",

View File

@ -142,6 +142,7 @@ static int __init nf_conntrack_tftp_init(void)
if (ret) {
pr_err("failed to register helper for pf: %u port: %u\n",
tftp[i][j].tuple.src.l3num, ports[i]);
ports_c = i;
nf_conntrack_tftp_fini();
return ret;
}

View File

@ -26,23 +26,21 @@
* Once the queue is registered it must reinject all packets it
* receives, no matter what.
*/
static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
/* return EBUSY when somebody else is registered, return EEXIST if the
* same handler is registered, return 0 in case of success. */
void nf_register_queue_handler(const struct nf_queue_handler *qh)
void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
{
/* should never happen, we only have one queueing backend in kernel */
WARN_ON(rcu_access_pointer(queue_handler));
rcu_assign_pointer(queue_handler, qh);
WARN_ON(rcu_access_pointer(net->nf.queue_handler));
rcu_assign_pointer(net->nf.queue_handler, qh);
}
EXPORT_SYMBOL(nf_register_queue_handler);
/* The caller must flush their queue before this */
void nf_unregister_queue_handler(void)
void nf_unregister_queue_handler(struct net *net)
{
RCU_INIT_POINTER(queue_handler, NULL);
synchronize_rcu();
RCU_INIT_POINTER(net->nf.queue_handler, NULL);
}
EXPORT_SYMBOL(nf_unregister_queue_handler);
@ -103,7 +101,7 @@ void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
const struct nf_queue_handler *qh;
rcu_read_lock();
qh = rcu_dereference(queue_handler);
qh = rcu_dereference(net->nf.queue_handler);
if (qh)
qh->nf_hook_drop(net, ops);
rcu_read_unlock();
@ -122,9 +120,10 @@ int nf_queue(struct sk_buff *skb,
struct nf_queue_entry *entry = NULL;
const struct nf_afinfo *afinfo;
const struct nf_queue_handler *qh;
struct net *net = state->net;
/* QUEUE == DROP if no one is waiting, to be safe. */
qh = rcu_dereference(queue_handler);
qh = rcu_dereference(net->nf.queue_handler);
if (!qh) {
status = -ESRCH;
goto err;

View File

@ -2647,6 +2647,8 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
/* Only accept unspec with dump */
if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
return -EAFNOSUPPORT;
if (!nla[NFTA_SET_TABLE])
return -EINVAL;
set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
if (IS_ERR(set))

View File

@ -557,7 +557,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
if (entskb->tstamp.tv64) {
struct nfqnl_msg_packet_timestamp ts;
struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
ts.sec = cpu_to_be64(kts.tv_sec);
ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
@ -1482,21 +1482,29 @@ static int __net_init nfnl_queue_net_init(struct net *net)
net->nf.proc_netfilter, &nfqnl_file_ops))
return -ENOMEM;
#endif
nf_register_queue_handler(net, &nfqh);
return 0;
}
static void __net_exit nfnl_queue_net_exit(struct net *net)
{
nf_unregister_queue_handler(net);
#ifdef CONFIG_PROC_FS
remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
#endif
}
static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
{
synchronize_rcu();
}
static struct pernet_operations nfnl_queue_net_ops = {
.init = nfnl_queue_net_init,
.exit = nfnl_queue_net_exit,
.id = &nfnl_queue_net_id,
.size = sizeof(struct nfnl_queue_net),
.init = nfnl_queue_net_init,
.exit = nfnl_queue_net_exit,
.exit_batch = nfnl_queue_net_exit_batch,
.id = &nfnl_queue_net_id,
.size = sizeof(struct nfnl_queue_net),
};
static int __init nfnetlink_queue_init(void)
@ -1517,7 +1525,6 @@ static int __init nfnetlink_queue_init(void)
}
register_netdevice_notifier(&nfqnl_dev_notifier);
nf_register_queue_handler(&nfqh);
return status;
cleanup_netlink_notifier:
@ -1529,7 +1536,6 @@ out:
static void __exit nfnetlink_queue_fini(void)
{
nf_unregister_queue_handler();
unregister_netdevice_notifier(&nfqnl_dev_notifier);
nfnetlink_subsys_unregister(&nfqnl_subsys);
netlink_unregister_notifier(&nfqnl_rtnl_notifier);

View File

@ -612,7 +612,7 @@ int xt_compat_check_entry_offsets(const void *base, const char *elems,
return -EINVAL;
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
target_offset + sizeof(struct compat_xt_standard_target) != next_offset)
COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
return -EINVAL;
/* compat_xt_entry match has less strict aligment requirements,
@ -694,7 +694,7 @@ int xt_check_entry_offsets(const void *base,
return -EINVAL;
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
target_offset + sizeof(struct xt_standard_target) != next_offset)
XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
return -EINVAL;
return xt_check_entry_match(elems, base + target_offset,

View File

@ -93,6 +93,7 @@
#include <net/inet_common.h>
#endif
#include <linux/bpf.h>
#include <net/compat.h>
#include "internal.h"
@ -3940,6 +3941,27 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
}
#ifdef CONFIG_COMPAT
static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct packet_sock *po = pkt_sk(sock->sk);
if (level != SOL_PACKET)
return -ENOPROTOOPT;
if (optname == PACKET_FANOUT_DATA &&
po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
optval = (char __user *)get_compat_bpf_fprog(optval);
if (!optval)
return -EFAULT;
optlen = sizeof(struct sock_fprog);
}
return packet_setsockopt(sock, level, optname, optval, optlen);
}
#endif
static int packet_notifier(struct notifier_block *this,
unsigned long msg, void *ptr)
{
@ -4416,6 +4438,9 @@ static const struct proto_ops packet_ops = {
.shutdown = sock_no_shutdown,
.setsockopt = packet_setsockopt,
.getsockopt = packet_getsockopt,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_packet_setsockopt,
#endif
.sendmsg = packet_sendmsg,
.recvmsg = packet_recvmsg,
.mmap = packet_mmap,

View File

@ -74,6 +74,7 @@ enum {
RDS_CONN_CONNECTING,
RDS_CONN_DISCONNECTING,
RDS_CONN_UP,
RDS_CONN_RESETTING,
RDS_CONN_ERROR,
};
@ -813,6 +814,7 @@ void rds_connect_worker(struct work_struct *);
void rds_shutdown_worker(struct work_struct *);
void rds_send_worker(struct work_struct *);
void rds_recv_worker(struct work_struct *);
void rds_connect_path_complete(struct rds_connection *conn, int curr);
void rds_connect_complete(struct rds_connection *conn);
/* transport.c */

View File

@ -561,5 +561,7 @@ void rds_inc_info_copy(struct rds_incoming *inc,
minfo.fport = inc->i_hdr.h_dport;
}
minfo.flags = 0;
rds_info_copy(iter, &minfo, sizeof(minfo));
}

View File

@ -99,6 +99,7 @@ void rds_send_reset(struct rds_connection *conn)
list_splice_init(&conn->c_retrans, &conn->c_send_queue);
spin_unlock_irqrestore(&conn->c_lock, flags);
}
EXPORT_SYMBOL_GPL(rds_send_reset);
static int acquire_in_xmit(struct rds_connection *conn)
{

View File

@ -126,9 +126,81 @@ void rds_tcp_restore_callbacks(struct socket *sock,
}
/*
* This is the only path that sets tc->t_sock. Send and receive trust that
* it is set. The RDS_CONN_UP bit protects those paths from being
* called while it isn't set.
* rds_tcp_reset_callbacks() switches the to the new sock and
* returns the existing tc->t_sock.
*
* The only functions that set tc->t_sock are rds_tcp_set_callbacks
* and rds_tcp_reset_callbacks. Send and receive trust that
* it is set. The absence of RDS_CONN_UP bit protects those paths
* from being called while it isn't set.
*/
void rds_tcp_reset_callbacks(struct socket *sock,
struct rds_connection *conn)
{
struct rds_tcp_connection *tc = conn->c_transport_data;
struct socket *osock = tc->t_sock;
if (!osock)
goto newsock;
/* Need to resolve a duelling SYN between peers.
* We have an outstanding SYN to this peer, which may
* potentially have transitioned to the RDS_CONN_UP state,
* so we must quiesce any send threads before resetting
* c_transport_data. We quiesce these threads by setting
* c_state to something other than RDS_CONN_UP, and then
* waiting for any existing threads in rds_send_xmit to
* complete release_in_xmit(). (Subsequent threads entering
* rds_send_xmit() will bail on !rds_conn_up().
*
* However an incoming syn-ack at this point would end up
* marking the conn as RDS_CONN_UP, and would again permit
* rds_send_xmi() threads through, so ideally we would
* synchronize on RDS_CONN_UP after lock_sock(), but cannot
* do that: waiting on !RDS_IN_XMIT after lock_sock() may
* end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
* would not get set. As a result, we set c_state to
* RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
* cannot mark rds_conn_path_up() in the window before lock_sock()
*/
atomic_set(&conn->c_state, RDS_CONN_RESETTING);
wait_event(conn->c_waitq, !test_bit(RDS_IN_XMIT, &conn->c_flags));
lock_sock(osock->sk);
/* reset receive side state for rds_tcp_data_recv() for osock */
if (tc->t_tinc) {
rds_inc_put(&tc->t_tinc->ti_inc);
tc->t_tinc = NULL;
}
tc->t_tinc_hdr_rem = sizeof(struct rds_header);
tc->t_tinc_data_rem = 0;
tc->t_sock = NULL;
write_lock_bh(&osock->sk->sk_callback_lock);
osock->sk->sk_user_data = NULL;
osock->sk->sk_data_ready = tc->t_orig_data_ready;
osock->sk->sk_write_space = tc->t_orig_write_space;
osock->sk->sk_state_change = tc->t_orig_state_change;
write_unlock_bh(&osock->sk->sk_callback_lock);
release_sock(osock->sk);
sock_release(osock);
newsock:
rds_send_reset(conn);
lock_sock(sock->sk);
write_lock_bh(&sock->sk->sk_callback_lock);
tc->t_sock = sock;
sock->sk->sk_user_data = conn;
sock->sk->sk_data_ready = rds_tcp_data_ready;
sock->sk->sk_write_space = rds_tcp_write_space;
sock->sk->sk_state_change = rds_tcp_state_change;
write_unlock_bh(&sock->sk->sk_callback_lock);
release_sock(sock->sk);
}
/* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
* above rds_tcp_reset_callbacks for notes about synchronization
* with data path
*/
void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
{

View File

@ -50,6 +50,7 @@ struct rds_tcp_statistics {
void rds_tcp_tune(struct socket *sock);
void rds_tcp_nonagle(struct socket *sock);
void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn);
void rds_tcp_reset_callbacks(struct socket *sock, struct rds_connection *conn);
void rds_tcp_restore_callbacks(struct socket *sock,
struct rds_tcp_connection *tc);
u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);

View File

@ -60,7 +60,7 @@ void rds_tcp_state_change(struct sock *sk)
case TCP_SYN_RECV:
break;
case TCP_ESTABLISHED:
rds_connect_complete(conn);
rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
break;
case TCP_CLOSE_WAIT:
case TCP_CLOSE:

View File

@ -78,7 +78,6 @@ int rds_tcp_accept_one(struct socket *sock)
struct inet_sock *inet;
struct rds_tcp_connection *rs_tcp = NULL;
int conn_state;
struct sock *nsk;
if (!sock) /* module unload or netns delete in progress */
return -ENETUNREACH;
@ -136,26 +135,21 @@ int rds_tcp_accept_one(struct socket *sock)
!conn->c_outgoing) {
goto rst_nsk;
} else {
atomic_set(&conn->c_state, RDS_CONN_CONNECTING);
wait_event(conn->c_waitq,
!test_bit(RDS_IN_XMIT, &conn->c_flags));
rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
rds_tcp_reset_callbacks(new_sock, conn);
conn->c_outgoing = 0;
/* rds_connect_path_complete() marks RDS_CONN_UP */
rds_connect_path_complete(conn, RDS_CONN_DISCONNECTING);
}
} else {
rds_tcp_set_callbacks(new_sock, conn);
rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
}
rds_tcp_set_callbacks(new_sock, conn);
rds_connect_complete(conn); /* marks RDS_CONN_UP */
new_sock = NULL;
ret = 0;
goto out;
rst_nsk:
/* reset the newly returned accept sock and bail */
nsk = new_sock->sk;
rds_tcp_stats_inc(s_tcp_listen_closed_stale);
nsk->sk_user_data = NULL;
nsk->sk_prot->disconnect(nsk, 0);
tcp_done(nsk);
new_sock = NULL;
kernel_sock_shutdown(new_sock, SHUT_RDWR);
ret = 0;
out:
if (rs_tcp)

View File

@ -71,9 +71,9 @@
struct workqueue_struct *rds_wq;
EXPORT_SYMBOL_GPL(rds_wq);
void rds_connect_complete(struct rds_connection *conn)
void rds_connect_path_complete(struct rds_connection *conn, int curr)
{
if (!rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_UP)) {
if (!rds_conn_transition(conn, curr, RDS_CONN_UP)) {
printk(KERN_WARNING "%s: Cannot transition to state UP, "
"current state is %d\n",
__func__,
@ -90,6 +90,12 @@ void rds_connect_complete(struct rds_connection *conn)
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
}
EXPORT_SYMBOL_GPL(rds_connect_path_complete);
void rds_connect_complete(struct rds_connection *conn)
{
rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
}
EXPORT_SYMBOL_GPL(rds_connect_complete);
/*

View File

@ -1162,9 +1162,7 @@ static int rxkad_init(void)
/* pin the cipher we need so that the crypto layer doesn't invoke
* keventd to go get it */
rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(rxkad_ci))
return PTR_ERR(rxkad_ci);
return 0;
return PTR_ERR_OR_ZERO(rxkad_ci);
}
/*

View File

@ -38,7 +38,7 @@ struct tcf_police {
bool peak_present;
};
#define to_police(pc) \
container_of(pc, struct tcf_police, common)
container_of(pc->priv, struct tcf_police, common)
#define POL_TAB_MASK 15
@ -119,14 +119,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action *a,
int ovr, int bind)
{
unsigned int h;
int ret = 0, err;
struct nlattr *tb[TCA_POLICE_MAX + 1];
struct tc_police *parm;
struct tcf_police *police;
struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
struct tc_action_net *tn = net_generic(net, police_net_id);
struct tcf_hashinfo *hinfo = tn->hinfo;
int size;
if (nla == NULL)
@ -145,7 +143,7 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
if (parm->index) {
if (tcf_hash_search(tn, a, parm->index)) {
police = to_police(a->priv);
police = to_police(a);
if (bind) {
police->tcf_bindcnt += 1;
police->tcf_refcnt += 1;
@ -156,16 +154,15 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
/* not replacing */
return -EEXIST;
}
} else {
ret = tcf_hash_create(tn, parm->index, NULL, a,
sizeof(*police), bind, false);
if (ret)
return ret;
ret = ACT_P_CREATED;
}
police = kzalloc(sizeof(*police), GFP_KERNEL);
if (police == NULL)
return -ENOMEM;
ret = ACT_P_CREATED;
police->tcf_refcnt = 1;
spin_lock_init(&police->tcf_lock);
if (bind)
police->tcf_bindcnt = 1;
police = to_police(a);
override:
if (parm->rate.rate) {
err = -ENOMEM;
@ -237,16 +234,8 @@ override:
return ret;
police->tcfp_t_c = ktime_get_ns();
police->tcf_index = parm->index ? parm->index :
tcf_hash_new_index(tn);
police->tcf_tm.install = jiffies;
police->tcf_tm.lastuse = jiffies;
h = tcf_hash(police->tcf_index, POL_TAB_MASK);
spin_lock_bh(&hinfo->lock);
hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
spin_unlock_bh(&hinfo->lock);
tcf_hash_insert(tn, a);
a->priv = police;
return ret;
failure_unlock:
@ -255,7 +244,7 @@ failure:
qdisc_put_rtab(P_tab);
qdisc_put_rtab(R_tab);
if (ret == ACT_P_CREATED)
kfree(police);
tcf_hash_cleanup(a, est);
return err;
}

View File

@ -171,7 +171,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie)
struct tc_cls_flower_offload offload = {0};
struct tc_to_netdev tc;
if (!tc_should_offload(dev, 0))
if (!tc_should_offload(dev, tp, 0))
return;
offload.command = TC_CLSFLOWER_DESTROY;
@ -194,7 +194,7 @@ static void fl_hw_replace_filter(struct tcf_proto *tp,
struct tc_cls_flower_offload offload = {0};
struct tc_to_netdev tc;
if (!tc_should_offload(dev, flags))
if (!tc_should_offload(dev, tp, flags))
return;
offload.command = TC_CLSFLOWER_REPLACE;
@ -216,7 +216,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
struct tc_cls_flower_offload offload = {0};
struct tc_to_netdev tc;
if (!tc_should_offload(dev, 0))
if (!tc_should_offload(dev, tp, 0))
return;
offload.command = TC_CLSFLOWER_STATS;

View File

@ -440,7 +440,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
if (tc_should_offload(dev, 0)) {
if (tc_should_offload(dev, tp, 0)) {
offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
offload.cls_u32->knode.handle = handle;
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
@ -457,20 +457,21 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp,
struct tc_to_netdev offload;
int err;
if (!tc_should_offload(dev, tp, flags))
return tc_skip_sw(flags) ? -EINVAL : 0;
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
if (tc_should_offload(dev, flags)) {
offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
offload.cls_u32->hnode.divisor = h->divisor;
offload.cls_u32->hnode.handle = h->handle;
offload.cls_u32->hnode.prio = h->prio;
offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
offload.cls_u32->hnode.divisor = h->divisor;
offload.cls_u32->hnode.handle = h->handle;
offload.cls_u32->hnode.prio = h->prio;
err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
tp->protocol, &offload);
if (tc_skip_sw(flags))
return err;
}
err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
tp->protocol, &offload);
if (tc_skip_sw(flags))
return err;
return 0;
}
@ -484,7 +485,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
if (tc_should_offload(dev, 0)) {
if (tc_should_offload(dev, tp, 0)) {
offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
offload.cls_u32->hnode.divisor = h->divisor;
offload.cls_u32->hnode.handle = h->handle;
@ -507,27 +508,28 @@ static int u32_replace_hw_knode(struct tcf_proto *tp,
offload.type = TC_SETUP_CLSU32;
offload.cls_u32 = &u32_offload;
if (tc_should_offload(dev, flags)) {
offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
offload.cls_u32->knode.handle = n->handle;
offload.cls_u32->knode.fshift = n->fshift;
#ifdef CONFIG_CLS_U32_MARK
offload.cls_u32->knode.val = n->val;
offload.cls_u32->knode.mask = n->mask;
#else
offload.cls_u32->knode.val = 0;
offload.cls_u32->knode.mask = 0;
#endif
offload.cls_u32->knode.sel = &n->sel;
offload.cls_u32->knode.exts = &n->exts;
if (n->ht_down)
offload.cls_u32->knode.link_handle = n->ht_down->handle;
if (!tc_should_offload(dev, tp, flags))
return tc_skip_sw(flags) ? -EINVAL : 0;
err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
tp->protocol, &offload);
if (tc_skip_sw(flags))
return err;
}
offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
offload.cls_u32->knode.handle = n->handle;
offload.cls_u32->knode.fshift = n->fshift;
#ifdef CONFIG_CLS_U32_MARK
offload.cls_u32->knode.val = n->val;
offload.cls_u32->knode.mask = n->mask;
#else
offload.cls_u32->knode.val = 0;
offload.cls_u32->knode.mask = 0;
#endif
offload.cls_u32->knode.sel = &n->sel;
offload.cls_u32->knode.exts = &n->exts;
if (n->ht_down)
offload.cls_u32->knode.link_handle = n->ht_down->handle;
err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
tp->protocol, &offload);
if (tc_skip_sw(flags))
return err;
return 0;
}
@ -863,7 +865,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
if (tb[TCA_U32_FLAGS]) {
flags = nla_get_u32(tb[TCA_U32_FLAGS]);
if (!tc_flags_valid(flags))
return err;
return -EINVAL;
}
n = (struct tc_u_knode *)*arg;
@ -921,11 +923,17 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
ht->divisor = divisor;
ht->handle = handle;
ht->prio = tp->prio;
err = u32_replace_hw_hnode(tp, ht, flags);
if (err) {
kfree(ht);
return err;
}
RCU_INIT_POINTER(ht->next, tp_c->hlist);
rcu_assign_pointer(tp_c->hlist, ht);
*arg = (unsigned long)ht;
u32_replace_hw_hnode(tp, ht, flags);
return 0;
}

View File

@ -375,6 +375,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl->deficit = cl->quantum;
}
qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return err;
}
@ -407,6 +408,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
bstats_update(&cl->bstats, skb);
qdisc_bstats_update(sch, skb);
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
}
@ -428,6 +430,7 @@ static unsigned int drr_drop(struct Qdisc *sch)
if (cl->qdisc->ops->drop) {
len = cl->qdisc->ops->drop(cl->qdisc);
if (len > 0) {
sch->qstats.backlog -= len;
sch->q.qlen--;
if (cl->qdisc->q.qlen == 0)
list_del(&cl->alist);
@ -463,6 +466,7 @@ static void drr_reset_qdisc(struct Qdisc *sch)
qdisc_reset(cl->qdisc);
}
}
sch->qstats.backlog = 0;
sch->q.qlen = 0;
}

View File

@ -199,6 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
unsigned int idx, prev_backlog, prev_qlen;
struct fq_codel_flow *flow;
int uninitialized_var(ret);
unsigned int pkt_len;
bool memory_limited;
idx = fq_codel_classify(skb, sch, &ret);
@ -230,6 +231,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
prev_backlog = sch->qstats.backlog;
prev_qlen = sch->q.qlen;
/* save this packet length as it might be dropped by fq_codel_drop() */
pkt_len = qdisc_pkt_len(skb);
/* fq_codel_drop() is quite expensive, as it performs a linear search
* in q->backlogs[] to find a fat flow.
* So instead of dropping a single packet, drop half of its backlog
@ -237,14 +240,23 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
*/
ret = fq_codel_drop(sch, q->drop_batch_size);
q->drop_overlimit += prev_qlen - sch->q.qlen;
prev_qlen -= sch->q.qlen;
prev_backlog -= sch->qstats.backlog;
q->drop_overlimit += prev_qlen;
if (memory_limited)
q->drop_overmemory += prev_qlen - sch->q.qlen;
/* As we dropped packet(s), better let upper stack know this */
qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
prev_backlog - sch->qstats.backlog);
q->drop_overmemory += prev_qlen;
return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS;
/* As we dropped packet(s), better let upper stack know this.
* If we dropped a packet for this flow, return NET_XMIT_CN,
* but in this case, our parents wont increase their backlogs.
*/
if (ret == idx) {
qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
prev_backlog - pkt_len);
return NET_XMIT_CN;
}
qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
return NET_XMIT_SUCCESS;
}
/* This is the specific function called from codel_dequeue()
@ -649,7 +661,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
qs.backlog = q->backlogs[idx];
qs.drops = flow->dropped;
}
if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
return -1;
if (idx < q->flows_cnt)
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));

View File

@ -49,6 +49,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
q->gso_skb = skb;
q->qstats.requeues++;
qdisc_qstats_backlog_inc(q, skb);
q->q.qlen++; /* it's still part of the queue */
__netif_schedule(q);
@ -92,6 +93,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
q->gso_skb = NULL;
qdisc_qstats_backlog_dec(q, skb);
q->q.qlen--;
} else
skb = NULL;

View File

@ -1529,6 +1529,7 @@ hfsc_reset_qdisc(struct Qdisc *sch)
q->eligible = RB_ROOT;
INIT_LIST_HEAD(&q->droplist);
qdisc_watchdog_cancel(&q->watchdog);
sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
@ -1559,14 +1560,6 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
struct hfsc_sched *q = qdisc_priv(sch);
unsigned char *b = skb_tail_pointer(skb);
struct tc_hfsc_qopt qopt;
struct hfsc_class *cl;
unsigned int i;
sch->qstats.backlog = 0;
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
sch->qstats.backlog += cl->qdisc->qstats.backlog;
}
qopt.defcls = q->defcls;
if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
@ -1604,6 +1597,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (cl->qdisc->q.qlen == 1)
set_active(cl, qdisc_pkt_len(skb));
qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
@ -1672,6 +1666,7 @@ hfsc_dequeue(struct Qdisc *sch)
qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
@ -1695,6 +1690,7 @@ hfsc_drop(struct Qdisc *sch)
}
cl->qstats.drops++;
qdisc_qstats_drop(sch);
sch->qstats.backlog -= len;
sch->q.qlen--;
return len;
}

View File

@ -27,6 +27,11 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid)
return TC_H_MIN(classid) + 1;
}
static bool ingress_cl_offload(u32 classid)
{
return true;
}
static unsigned long ingress_bind_filter(struct Qdisc *sch,
unsigned long parent, u32 classid)
{
@ -86,6 +91,7 @@ static const struct Qdisc_class_ops ingress_class_ops = {
.put = ingress_put,
.walk = ingress_walk,
.tcf_chain = ingress_find_tcf,
.tcf_cl_offload = ingress_cl_offload,
.bind_tcf = ingress_bind_filter,
.unbind_tcf = ingress_put,
};
@ -110,6 +116,11 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid)
}
}
static bool clsact_cl_offload(u32 classid)
{
return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS);
}
static unsigned long clsact_bind_filter(struct Qdisc *sch,
unsigned long parent, u32 classid)
{
@ -158,6 +169,7 @@ static const struct Qdisc_class_ops clsact_class_ops = {
.put = ingress_put,
.walk = ingress_walk,
.tcf_chain = clsact_find_tcf,
.tcf_cl_offload = clsact_cl_offload,
.bind_tcf = clsact_bind_filter,
.unbind_tcf = ingress_put,
};

View File

@ -85,6 +85,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) {
qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@ -117,6 +118,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
if (skb) {
qdisc_bstats_update(sch, skb);
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
}
@ -135,6 +137,7 @@ static unsigned int prio_drop(struct Qdisc *sch)
for (prio = q->bands-1; prio >= 0; prio--) {
qdisc = q->queues[prio];
if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
sch->qstats.backlog -= len;
sch->q.qlen--;
return len;
}
@ -151,6 +154,7 @@ prio_reset(struct Qdisc *sch)
for (prio = 0; prio < q->bands; prio++)
qdisc_reset(q->queues[prio]);
sch->qstats.backlog = 0;
sch->q.qlen = 0;
}

View File

@ -1235,8 +1235,10 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
err = qfq_change_agg(sch, cl, cl->agg->class_weight,
qdisc_pkt_len(skb));
if (err)
return err;
if (err) {
cl->qstats.drops++;
return qdisc_drop(skb, sch);
}
}
err = qdisc_enqueue(skb, cl->qdisc);

View File

@ -97,6 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, child);
if (likely(ret == NET_XMIT_SUCCESS)) {
qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++;
@ -118,6 +119,7 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch)
skb = child->dequeue(child);
if (skb) {
qdisc_bstats_update(sch, skb);
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
} else {
if (!red_is_idling(&q->vars))
@ -143,6 +145,7 @@ static unsigned int red_drop(struct Qdisc *sch)
if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
q->stats.other++;
qdisc_qstats_drop(sch);
sch->qstats.backlog -= len;
sch->q.qlen--;
return len;
}
@ -158,6 +161,7 @@ static void red_reset(struct Qdisc *sch)
struct red_sched_data *q = qdisc_priv(sch);
qdisc_reset(q->qdisc);
sch->qstats.backlog = 0;
sch->q.qlen = 0;
red_restart(&q->vars);
}

View File

@ -207,6 +207,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret;
}
qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
@ -217,6 +218,7 @@ static unsigned int tbf_drop(struct Qdisc *sch)
unsigned int len = 0;
if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
sch->qstats.backlog -= len;
sch->q.qlen--;
qdisc_qstats_drop(sch);
}
@ -263,6 +265,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
q->t_c = now;
q->tokens = toks;
q->ptokens = ptoks;
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
@ -294,6 +297,7 @@ static void tbf_reset(struct Qdisc *sch)
struct tbf_sched_data *q = qdisc_priv(sch);
qdisc_reset(q->qdisc);
sch->qstats.backlog = 0;
sch->q.qlen = 0;
q->t_c = ktime_get_ns();
q->tokens = q->buffer;

View File

@ -604,7 +604,8 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]));
nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]),
TIPC_MAX_LINK_NAME);
return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
&link_info, sizeof(link_info));

View File

@ -363,8 +363,6 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel);
WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch);
WARN_ON(ops->add_tx_ts && !ops->del_tx_ts);
WARN_ON(ops->set_tx_power && !ops->get_tx_power);
WARN_ON(ops->set_antenna && !ops->get_antenna);
alloc_size = sizeof(*rdev) + sizeof_priv;

View File

@ -958,8 +958,29 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
return private(dev, iwr, cmd, info, handler);
}
/* Old driver API : call driver ioctl handler */
if (dev->netdev_ops->ndo_do_ioctl)
return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
if (dev->netdev_ops->ndo_do_ioctl) {
#ifdef CONFIG_COMPAT
if (info->flags & IW_REQUEST_FLAG_COMPAT) {
int ret = 0;
struct iwreq iwr_lcl;
struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
iwr_lcl.u.data.length = iwp_compat->length;
iwr_lcl.u.data.flags = iwp_compat->flags;
ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
iwp_compat->length = iwr_lcl.u.data.length;
iwp_compat->flags = iwr_lcl.u.data.flags;
return ret;
} else
#endif
return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
}
return -EOPNOTSUPP;
}

View File

@ -111,9 +111,9 @@ static void attach_ebpf(int fd, uint16_t mod)
memset(&attr, 0, sizeof(attr));
attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
attr.insn_cnt = ARRAY_SIZE(prog);
attr.insns = (uint64_t)prog;
attr.license = (uint64_t)bpf_license;
attr.log_buf = (uint64_t)bpf_log_buf;
attr.insns = (unsigned long) &prog;
attr.license = (unsigned long) &bpf_license;
attr.log_buf = (unsigned long) &bpf_log_buf;
attr.log_size = sizeof(bpf_log_buf);
attr.log_level = 1;
attr.kern_version = 0;
@ -351,8 +351,8 @@ static void test_filter_no_reuseport(const struct test_params p)
memset(&eprog, 0, sizeof(eprog));
eprog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
eprog.insn_cnt = ARRAY_SIZE(ecode);
eprog.insns = (uint64_t)ecode;
eprog.license = (uint64_t)bpf_license;
eprog.insns = (unsigned long) &ecode;
eprog.license = (unsigned long) &bpf_license;
eprog.kern_version = 0;
memset(&cprog, 0, sizeof(cprog));