forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Handle v4/v6 mixed sockets properly in soreuseport, from Craig Gallak. 2) Bug fixes for the new macsec facility (missing kmalloc NULL checks, missing locking around netdev list traversal, etc.) from Sabrina Dubroca. 3) Fix handling of host routes on ifdown in ipv6, from David Ahern. 4) Fix double-fdput in bpf verifier. From Jann Horn. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (31 commits) bpf: fix double-fdput in replace_map_fd_with_map_ptr() net: ipv6: Delete host routes on an ifdown Revert "ipv6: Revert optional address flusing on ifdown." net/mlx4_en: fix spurious timestamping callbacks net: dummy: remove note about being Y by default cxgbi: fix uninitialized flowi6 ipv6: Revert optional address flusing on ifdown. ipv4/fib: don't warn when primary address is missing if in_dev is dead net/mlx5: Add pci shutdown callback net/mlx5_core: Remove static from local variable net/mlx5e: Use vport MTU rather than physical port MTU net/mlx5e: Fix minimum MTU net/mlx5e: Device's mtu field is u16 and not int net/mlx5_core: Add ConnectX-5 to list of supported devices net/mlx5e: Fix MLX5E_100BASE_T define net/mlx5_core: Fix soft lockup in steering error flow qlcnic: Update version to 5.3.64 net: stmmac: socfpga: Remove re-registration of reset controller macsec: fix netlink attribute validation macsec: add missing macsec prefix in uapi ...
This commit is contained in:
commit
f28f20da70
@ -11071,6 +11071,15 @@ S: Maintained
|
||||
F: drivers/clk/ti/
|
||||
F: include/linux/clk/ti.h
|
||||
|
||||
TI ETHERNET SWITCH DRIVER (CPSW)
|
||||
M: Mugunthan V N <mugunthanvnm@ti.com>
|
||||
R: Grygorii Strashko <grygorii.strashko@ti.com>
|
||||
L: linux-omap@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/ti/cpsw*
|
||||
F: drivers/net/ethernet/ti/davinci*
|
||||
|
||||
TI FLASH MEDIA INTERFACE DRIVER
|
||||
M: Alex Dubov <oakad@yahoo.com>
|
||||
S: Maintained
|
||||
|
@ -671,8 +671,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
struct mlx5_hca_vport_context *rep;
|
||||
int max_mtu;
|
||||
int oper_mtu;
|
||||
u16 max_mtu;
|
||||
u16 oper_mtu;
|
||||
int err;
|
||||
u8 ib_link_width_oper;
|
||||
u8 vl_hw_cap;
|
||||
|
@ -62,9 +62,8 @@ config DUMMY
|
||||
this device is consigned into oblivion) with a configurable IP
|
||||
address. It is most commonly used in order to make your currently
|
||||
inactive SLIP address seem like a real address for local programs.
|
||||
If you use SLIP or PPP, you might want to say Y here. Since this
|
||||
thing often comes in handy, the default is Y. It won't enlarge your
|
||||
kernel either. What a deal. Read about it in the Network
|
||||
If you use SLIP or PPP, you might want to say Y here. It won't
|
||||
enlarge your kernel. What a deal. Read about it in the Network
|
||||
Administrator's Guide, available from
|
||||
<http://www.tldp.org/docs.html#guide>.
|
||||
|
||||
|
@ -405,7 +405,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
|
||||
u32 packets = 0;
|
||||
u32 bytes = 0;
|
||||
int factor = priv->cqe_factor;
|
||||
u64 timestamp = 0;
|
||||
int done = 0;
|
||||
int budget = priv->tx_work_limit;
|
||||
u32 last_nr_txbb;
|
||||
@ -445,9 +444,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
|
||||
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
|
||||
|
||||
do {
|
||||
u64 timestamp = 0;
|
||||
|
||||
txbbs_skipped += last_nr_txbb;
|
||||
ring_index = (ring_index + last_nr_txbb) & size_mask;
|
||||
if (ring->tx_info[ring_index].ts_requested)
|
||||
|
||||
if (unlikely(ring->tx_info[ring_index].ts_requested))
|
||||
timestamp = mlx4_en_get_cqe_ts(cqe);
|
||||
|
||||
/* free next descriptor */
|
||||
|
@ -609,7 +609,7 @@ enum mlx5e_link_mode {
|
||||
MLX5E_100GBASE_KR4 = 22,
|
||||
MLX5E_100GBASE_LR4 = 23,
|
||||
MLX5E_100BASE_TX = 24,
|
||||
MLX5E_100BASE_T = 25,
|
||||
MLX5E_1000BASE_T = 25,
|
||||
MLX5E_10GBASE_T = 26,
|
||||
MLX5E_25GBASE_CR = 27,
|
||||
MLX5E_25GBASE_KR = 28,
|
||||
|
@ -138,10 +138,10 @@ static const struct {
|
||||
[MLX5E_100BASE_TX] = {
|
||||
.speed = 100,
|
||||
},
|
||||
[MLX5E_100BASE_T] = {
|
||||
.supported = SUPPORTED_100baseT_Full,
|
||||
.advertised = ADVERTISED_100baseT_Full,
|
||||
.speed = 100,
|
||||
[MLX5E_1000BASE_T] = {
|
||||
.supported = SUPPORTED_1000baseT_Full,
|
||||
.advertised = ADVERTISED_1000baseT_Full,
|
||||
.speed = 1000,
|
||||
},
|
||||
[MLX5E_10GBASE_T] = {
|
||||
.supported = SUPPORTED_10000baseT_Full,
|
||||
|
@ -1404,24 +1404,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
|
||||
static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int hw_mtu;
|
||||
u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
|
||||
int err;
|
||||
|
||||
err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
|
||||
err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
|
||||
/* Update vport context MTU */
|
||||
mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
|
||||
netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
|
||||
__func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
|
||||
static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u16 hw_mtu = 0;
|
||||
int err;
|
||||
|
||||
netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
|
||||
err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
|
||||
if (err || !hw_mtu) /* fallback to port oper mtu */
|
||||
mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
|
||||
|
||||
*mtu = MLX5E_HW2SW_MTU(hw_mtu);
|
||||
}
|
||||
|
||||
static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
u16 mtu;
|
||||
int err;
|
||||
|
||||
err = mlx5e_set_mtu(priv, netdev->mtu);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mlx5e_query_mtu(priv, &mtu);
|
||||
if (mtu != netdev->mtu)
|
||||
netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
|
||||
__func__, mtu, netdev->mtu);
|
||||
|
||||
netdev->mtu = mtu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1999,22 +2025,27 @@ static int mlx5e_set_features(struct net_device *netdev,
|
||||
return err;
|
||||
}
|
||||
|
||||
#define MXL5_HW_MIN_MTU 64
|
||||
#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
|
||||
|
||||
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
bool was_opened;
|
||||
int max_mtu;
|
||||
u16 max_mtu;
|
||||
u16 min_mtu;
|
||||
int err = 0;
|
||||
|
||||
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
|
||||
|
||||
max_mtu = MLX5E_HW2SW_MTU(max_mtu);
|
||||
min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
|
||||
|
||||
if (new_mtu > max_mtu) {
|
||||
if (new_mtu > max_mtu || new_mtu < min_mtu) {
|
||||
netdev_err(netdev,
|
||||
"%s: Bad MTU (%d) > (%d) Max\n",
|
||||
__func__, new_mtu, max_mtu);
|
||||
"%s: Bad MTU (%d), valid range is: [%d..%d]\n",
|
||||
__func__, new_mtu, min_mtu, max_mtu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -2602,7 +2633,16 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
|
||||
schedule_work(&priv->set_rx_mode_work);
|
||||
mlx5e_disable_async_events(priv);
|
||||
flush_scheduled_work();
|
||||
unregister_netdev(netdev);
|
||||
if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
|
||||
netif_device_detach(netdev);
|
||||
mutex_lock(&priv->state_lock);
|
||||
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
mlx5e_close_locked(netdev);
|
||||
mutex_unlock(&priv->state_lock);
|
||||
} else {
|
||||
unregister_netdev(netdev);
|
||||
}
|
||||
|
||||
mlx5e_tc_cleanup(priv);
|
||||
mlx5e_vxlan_cleanup(priv);
|
||||
mlx5e_destroy_flow_tables(priv);
|
||||
@ -2615,7 +2655,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
|
||||
mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
|
||||
mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
|
||||
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
|
||||
free_netdev(netdev);
|
||||
|
||||
if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
|
||||
free_netdev(netdev);
|
||||
}
|
||||
|
||||
static void *mlx5e_get_netdev(void *vpriv)
|
||||
|
@ -1065,33 +1065,6 @@ unlock_fg:
|
||||
return rule;
|
||||
}
|
||||
|
||||
static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
|
||||
u8 match_criteria_enable,
|
||||
u32 *match_criteria,
|
||||
u32 *match_value,
|
||||
u8 action,
|
||||
u32 flow_tag,
|
||||
struct mlx5_flow_destination *dest)
|
||||
{
|
||||
struct mlx5_flow_rule *rule;
|
||||
struct mlx5_flow_group *g;
|
||||
|
||||
g = create_autogroup(ft, match_criteria_enable, match_criteria);
|
||||
if (IS_ERR(g))
|
||||
return (void *)g;
|
||||
|
||||
rule = add_rule_fg(g, match_value,
|
||||
action, flow_tag, dest);
|
||||
if (IS_ERR(rule)) {
|
||||
/* Remove assumes refcount > 0 and autogroup creates a group
|
||||
* with a refcount = 0.
|
||||
*/
|
||||
tree_get_node(&g->node);
|
||||
tree_remove_node(&g->node);
|
||||
}
|
||||
return rule;
|
||||
}
|
||||
|
||||
static struct mlx5_flow_rule *
|
||||
_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
|
||||
u8 match_criteria_enable,
|
||||
@ -1119,8 +1092,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria,
|
||||
match_value, action, flow_tag, dest);
|
||||
g = create_autogroup(ft, match_criteria_enable, match_criteria);
|
||||
if (IS_ERR(g)) {
|
||||
rule = (void *)g;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
rule = add_rule_fg(g, match_value,
|
||||
action, flow_tag, dest);
|
||||
if (IS_ERR(rule)) {
|
||||
/* Remove assumes refcount > 0 and autogroup creates a group
|
||||
* with a refcount = 0.
|
||||
*/
|
||||
unlock_ref_node(&ft->node);
|
||||
tree_get_node(&g->node);
|
||||
tree_remove_node(&g->node);
|
||||
return rule;
|
||||
}
|
||||
unlock:
|
||||
unlock_ref_node(&ft->node);
|
||||
return rule;
|
||||
@ -1288,7 +1276,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
|
||||
{
|
||||
struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
|
||||
int prio;
|
||||
static struct fs_prio *fs_prio;
|
||||
struct fs_prio *fs_prio;
|
||||
struct mlx5_flow_namespace *ns;
|
||||
|
||||
if (!root_ns)
|
||||
|
@ -966,7 +966,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
||||
int err;
|
||||
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
if (dev->interface_state == MLX5_INTERFACE_STATE_UP) {
|
||||
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
|
||||
dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
|
||||
__func__);
|
||||
goto out;
|
||||
@ -1133,7 +1133,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
||||
if (err)
|
||||
pr_info("failed request module on %s\n", MLX5_IB_MOD);
|
||||
|
||||
dev->interface_state = MLX5_INTERFACE_STATE_UP;
|
||||
clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
|
||||
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
||||
out:
|
||||
mutex_unlock(&dev->intf_state_mutex);
|
||||
|
||||
@ -1207,7 +1208,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
||||
}
|
||||
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) {
|
||||
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
|
||||
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
|
||||
__func__);
|
||||
goto out;
|
||||
@ -1241,7 +1242,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
||||
mlx5_cmd_cleanup(dev);
|
||||
|
||||
out:
|
||||
dev->interface_state = MLX5_INTERFACE_STATE_DOWN;
|
||||
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
||||
set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
|
||||
mutex_unlock(&dev->intf_state_mutex);
|
||||
return err;
|
||||
}
|
||||
@ -1452,6 +1454,18 @@ static const struct pci_error_handlers mlx5_err_handler = {
|
||||
.resume = mlx5_pci_resume
|
||||
};
|
||||
|
||||
static void shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
|
||||
dev_info(&pdev->dev, "Shutdown was called\n");
|
||||
/* Notify mlx5 clients that the kernel is being shut down */
|
||||
set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
|
||||
mlx5_unload_one(dev, priv);
|
||||
mlx5_pci_disable_device(dev);
|
||||
}
|
||||
|
||||
static const struct pci_device_id mlx5_core_pci_table[] = {
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
|
||||
@ -1459,6 +1473,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
@ -1469,6 +1485,7 @@ static struct pci_driver mlx5_core_driver = {
|
||||
.id_table = mlx5_core_pci_table,
|
||||
.probe = init_one,
|
||||
.remove = remove_one,
|
||||
.shutdown = shutdown,
|
||||
.err_handler = &mlx5_err_handler,
|
||||
.sriov_configure = mlx5_core_sriov_configure,
|
||||
};
|
||||
|
@ -247,8 +247,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
|
||||
|
||||
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
|
||||
int *max_mtu, int *oper_mtu, u8 port)
|
||||
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
|
||||
u16 *max_mtu, u16 *oper_mtu, u8 port)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
|
||||
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
|
||||
@ -268,7 +268,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
|
||||
*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
|
||||
}
|
||||
|
||||
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
|
||||
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
|
||||
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
|
||||
@ -283,14 +283,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
|
||||
|
||||
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
|
||||
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
|
||||
u8 port)
|
||||
{
|
||||
mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
|
||||
|
||||
void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
|
||||
void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
|
||||
u8 port)
|
||||
{
|
||||
mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
|
||||
|
@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
|
||||
|
||||
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
|
||||
{
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
|
||||
u32 *out;
|
||||
int err;
|
||||
|
||||
out = mlx5_vzalloc(outlen);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
|
||||
if (!err)
|
||||
*mtu = MLX5_GET(query_nic_vport_context_out, out,
|
||||
nic_vport_context.mtu);
|
||||
|
||||
kvfree(out);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
|
||||
|
||||
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
|
||||
{
|
||||
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
|
||||
void *in;
|
||||
int err;
|
||||
|
||||
in = mlx5_vzalloc(inlen);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
||||
MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
|
||||
MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
|
||||
|
||||
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
|
||||
|
||||
kvfree(in);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
|
||||
|
||||
int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
|
||||
u32 vport,
|
||||
enum mlx5_list_type list_type,
|
||||
|
@ -37,8 +37,8 @@
|
||||
|
||||
#define _QLCNIC_LINUX_MAJOR 5
|
||||
#define _QLCNIC_LINUX_MINOR 3
|
||||
#define _QLCNIC_LINUX_SUBVERSION 63
|
||||
#define QLCNIC_LINUX_VERSIONID "5.3.63"
|
||||
#define _QLCNIC_LINUX_SUBVERSION 64
|
||||
#define QLCNIC_LINUX_VERSIONID "5.3.64"
|
||||
#define QLCNIC_DRV_IDC_VER 0x01
|
||||
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
|
||||
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
|
||||
|
@ -49,7 +49,6 @@ struct socfpga_dwmac {
|
||||
u32 reg_shift;
|
||||
struct device *dev;
|
||||
struct regmap *sys_mgr_base_addr;
|
||||
struct reset_control *stmmac_rst;
|
||||
void __iomem *splitter_base;
|
||||
bool f2h_ptp_ref_clk;
|
||||
};
|
||||
@ -92,15 +91,6 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
|
||||
struct device_node *np_splitter;
|
||||
struct resource res_splitter;
|
||||
|
||||
dwmac->stmmac_rst = devm_reset_control_get(dev,
|
||||
STMMAC_RESOURCE_NAME);
|
||||
if (IS_ERR(dwmac->stmmac_rst)) {
|
||||
dev_info(dev, "Could not get reset control!\n");
|
||||
if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
dwmac->stmmac_rst = NULL;
|
||||
}
|
||||
|
||||
dwmac->interface = of_get_phy_mode(np);
|
||||
|
||||
sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
|
||||
@ -194,30 +184,23 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
|
||||
{
|
||||
struct socfpga_dwmac *dwmac = priv;
|
||||
|
||||
/* On socfpga platform exit, assert and hold reset to the
|
||||
* enet controller - the default state after a hard reset.
|
||||
*/
|
||||
if (dwmac->stmmac_rst)
|
||||
reset_control_assert(dwmac->stmmac_rst);
|
||||
}
|
||||
|
||||
static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
|
||||
{
|
||||
struct socfpga_dwmac *dwmac = priv;
|
||||
struct socfpga_dwmac *dwmac = priv;
|
||||
struct net_device *ndev = platform_get_drvdata(pdev);
|
||||
struct stmmac_priv *stpriv = NULL;
|
||||
int ret = 0;
|
||||
|
||||
if (ndev)
|
||||
stpriv = netdev_priv(ndev);
|
||||
if (!ndev)
|
||||
return -EINVAL;
|
||||
|
||||
stpriv = netdev_priv(ndev);
|
||||
if (!stpriv)
|
||||
return -EINVAL;
|
||||
|
||||
/* Assert reset to the enet controller before changing the phy mode */
|
||||
if (dwmac->stmmac_rst)
|
||||
reset_control_assert(dwmac->stmmac_rst);
|
||||
if (stpriv->stmmac_rst)
|
||||
reset_control_assert(stpriv->stmmac_rst);
|
||||
|
||||
/* Setup the phy mode in the system manager registers according to
|
||||
* devicetree configuration
|
||||
@ -227,8 +210,8 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
|
||||
/* Deassert reset for the phy configuration to be sampled by
|
||||
* the enet controller, and operation to start in requested mode
|
||||
*/
|
||||
if (dwmac->stmmac_rst)
|
||||
reset_control_deassert(dwmac->stmmac_rst);
|
||||
if (stpriv->stmmac_rst)
|
||||
reset_control_deassert(stpriv->stmmac_rst);
|
||||
|
||||
/* Before the enet controller is suspended, the phy is suspended.
|
||||
* This causes the phy clock to be gated. The enet controller is
|
||||
@ -245,7 +228,7 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
|
||||
* control register 0, and can be modified by the phy driver
|
||||
* framework.
|
||||
*/
|
||||
if (stpriv && stpriv->phydev)
|
||||
if (stpriv->phydev)
|
||||
phy_resume(stpriv->phydev);
|
||||
|
||||
return ret;
|
||||
@ -285,14 +268,13 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
|
||||
|
||||
plat_dat->bsp_priv = dwmac;
|
||||
plat_dat->init = socfpga_dwmac_init;
|
||||
plat_dat->exit = socfpga_dwmac_exit;
|
||||
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
|
||||
|
||||
ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
|
||||
if (!ret)
|
||||
ret = socfpga_dwmac_init(pdev, dwmac);
|
||||
|
||||
return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct of_device_id socfpga_dwmac_match[] = {
|
||||
|
@ -880,12 +880,12 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
|
||||
macsec_skb_cb(skb)->valid = false;
|
||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
|
||||
if (!req) {
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
hdr = (struct macsec_eth_header *)skb->data;
|
||||
@ -905,7 +905,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
|
||||
skb = skb_unshare(skb, GFP_ATOMIC);
|
||||
if (!skb) {
|
||||
aead_request_free(req);
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
} else {
|
||||
/* integrity only: all headers + data authenticated */
|
||||
@ -921,14 +921,14 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
|
||||
dev_hold(dev);
|
||||
ret = crypto_aead_decrypt(req);
|
||||
if (ret == -EINPROGRESS) {
|
||||
return NULL;
|
||||
return ERR_PTR(ret);
|
||||
} else if (ret != 0) {
|
||||
/* decryption/authentication failed
|
||||
* 10.6 if validateFrames is disabled, deliver anyway
|
||||
*/
|
||||
if (ret != -EBADMSG) {
|
||||
kfree_skb(skb);
|
||||
skb = NULL;
|
||||
skb = ERR_PTR(ret);
|
||||
}
|
||||
} else {
|
||||
macsec_skb_cb(skb)->valid = true;
|
||||
@ -1146,8 +1146,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
|
||||
secy->validate_frames != MACSEC_VALIDATE_DISABLED)
|
||||
skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
|
||||
|
||||
if (!skb) {
|
||||
macsec_rxsa_put(rx_sa);
|
||||
if (IS_ERR(skb)) {
|
||||
/* the decrypt callback needs the reference */
|
||||
if (PTR_ERR(skb) != -EINPROGRESS)
|
||||
macsec_rxsa_put(rx_sa);
|
||||
rcu_read_unlock();
|
||||
*pskb = NULL;
|
||||
return RX_HANDLER_CONSUMED;
|
||||
@ -1161,7 +1163,8 @@ deliver:
|
||||
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
|
||||
macsec_reset_skb(skb, secy->netdev);
|
||||
|
||||
macsec_rxsa_put(rx_sa);
|
||||
if (rx_sa)
|
||||
macsec_rxsa_put(rx_sa);
|
||||
count_rx(dev, skb->len);
|
||||
|
||||
rcu_read_unlock();
|
||||
@ -1622,8 +1625,9 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
|
||||
}
|
||||
|
||||
rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
|
||||
if (init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len,
|
||||
secy->icv_len)) {
|
||||
if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
|
||||
secy->key_len, secy->icv_len)) {
|
||||
kfree(rx_sa);
|
||||
rtnl_unlock();
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -1768,6 +1772,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
|
||||
tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
|
||||
if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
|
||||
secy->key_len, secy->icv_len)) {
|
||||
kfree(tx_sa);
|
||||
rtnl_unlock();
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -2227,7 +2232,8 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
|
||||
return 1;
|
||||
|
||||
if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) ||
|
||||
nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, DEFAULT_CIPHER_ID) ||
|
||||
nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
|
||||
MACSEC_DEFAULT_CIPHER_ID) ||
|
||||
nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
|
||||
nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
|
||||
nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
|
||||
@ -2268,7 +2274,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
|
||||
if (!hdr)
|
||||
return -EMSGSIZE;
|
||||
|
||||
rtnl_lock();
|
||||
genl_dump_check_consistent(cb, hdr, &macsec_fam);
|
||||
|
||||
if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
|
||||
goto nla_put_failure;
|
||||
@ -2429,18 +2435,17 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
|
||||
|
||||
nla_nest_end(skb, rxsc_list);
|
||||
|
||||
rtnl_unlock();
|
||||
|
||||
genlmsg_end(skb, hdr);
|
||||
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
rtnl_unlock();
|
||||
genlmsg_cancel(skb, hdr);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int macsec_generation = 1; /* protected by RTNL */
|
||||
|
||||
static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
struct net *net = sock_net(skb->sk);
|
||||
@ -2450,6 +2455,10 @@ static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
dev_idx = cb->args[0];
|
||||
|
||||
d = 0;
|
||||
rtnl_lock();
|
||||
|
||||
cb->seq = macsec_generation;
|
||||
|
||||
for_each_netdev(net, dev) {
|
||||
struct macsec_secy *secy;
|
||||
|
||||
@ -2467,6 +2476,7 @@ next:
|
||||
}
|
||||
|
||||
done:
|
||||
rtnl_unlock();
|
||||
cb->args[0] = d;
|
||||
return skb->len;
|
||||
}
|
||||
@ -2920,10 +2930,14 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
|
||||
struct net_device *real_dev = macsec->real_dev;
|
||||
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
|
||||
|
||||
macsec_generation++;
|
||||
|
||||
unregister_netdevice_queue(dev, head);
|
||||
list_del_rcu(&macsec->secys);
|
||||
if (list_empty(&rxd->secys))
|
||||
if (list_empty(&rxd->secys)) {
|
||||
netdev_rx_handler_unregister(real_dev);
|
||||
kfree(rxd);
|
||||
}
|
||||
|
||||
macsec_del_dev(macsec);
|
||||
}
|
||||
@ -2945,8 +2959,10 @@ static int register_macsec_dev(struct net_device *real_dev,
|
||||
|
||||
err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
|
||||
rxd);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
kfree(rxd);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
list_add_tail_rcu(&macsec->secys, &rxd->secys);
|
||||
@ -3066,6 +3082,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
|
||||
if (err < 0)
|
||||
goto del_dev;
|
||||
|
||||
macsec_generation++;
|
||||
|
||||
dev_hold(real_dev);
|
||||
|
||||
return 0;
|
||||
@ -3079,7 +3097,7 @@ unregister:
|
||||
|
||||
static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
|
||||
{
|
||||
u64 csid = DEFAULT_CIPHER_ID;
|
||||
u64 csid = MACSEC_DEFAULT_CIPHER_ID;
|
||||
u8 icv_len = DEFAULT_ICV_LEN;
|
||||
int flag;
|
||||
bool es, scb, sci;
|
||||
@ -3094,8 +3112,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
|
||||
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
|
||||
|
||||
switch (csid) {
|
||||
case DEFAULT_CIPHER_ID:
|
||||
case DEFAULT_CIPHER_ALT:
|
||||
case MACSEC_DEFAULT_CIPHER_ID:
|
||||
case MACSEC_DEFAULT_CIPHER_ALT:
|
||||
if (icv_len < MACSEC_MIN_ICV_LEN ||
|
||||
icv_len > MACSEC_MAX_ICV_LEN)
|
||||
return -EINVAL;
|
||||
@ -3129,8 +3147,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
|
||||
nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
if ((data[IFLA_MACSEC_PROTECT] &&
|
||||
nla_get_u8(data[IFLA_MACSEC_PROTECT])) &&
|
||||
if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
|
||||
nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
|
||||
!data[IFLA_MACSEC_WINDOW])
|
||||
return -EINVAL;
|
||||
|
||||
@ -3168,7 +3186,8 @@ static int macsec_fill_info(struct sk_buff *skb,
|
||||
|
||||
if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) ||
|
||||
nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
|
||||
nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, DEFAULT_CIPHER_ID) ||
|
||||
nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE,
|
||||
MACSEC_DEFAULT_CIPHER_ID) ||
|
||||
nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
|
||||
nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
|
||||
nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
|
||||
|
@ -688,6 +688,7 @@ static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
|
||||
{
|
||||
struct flowi6 fl;
|
||||
|
||||
memset(&fl, 0, sizeof(fl));
|
||||
if (saddr)
|
||||
memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
|
||||
if (daddr)
|
||||
|
@ -519,8 +519,9 @@ enum mlx5_device_state {
|
||||
};
|
||||
|
||||
enum mlx5_interface_state {
|
||||
MLX5_INTERFACE_STATE_DOWN,
|
||||
MLX5_INTERFACE_STATE_UP,
|
||||
MLX5_INTERFACE_STATE_DOWN = BIT(0),
|
||||
MLX5_INTERFACE_STATE_UP = BIT(1),
|
||||
MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
|
||||
};
|
||||
|
||||
enum mlx5_pci_status {
|
||||
@ -544,7 +545,7 @@ struct mlx5_core_dev {
|
||||
enum mlx5_device_state state;
|
||||
/* sync interface state */
|
||||
struct mutex intf_state_mutex;
|
||||
enum mlx5_interface_state interface_state;
|
||||
unsigned long intf_state;
|
||||
void (*event) (struct mlx5_core_dev *dev,
|
||||
enum mlx5_dev_event event,
|
||||
unsigned long param);
|
||||
|
@ -54,9 +54,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
|
||||
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
|
||||
enum mlx5_port_status *status);
|
||||
|
||||
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
|
||||
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
|
||||
void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
|
||||
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
|
||||
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
|
||||
void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
|
||||
u8 port);
|
||||
|
||||
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
|
||||
|
@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
||||
u16 vport, u8 *addr);
|
||||
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
|
||||
u16 vport, u8 *addr);
|
||||
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
|
||||
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
|
||||
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
|
||||
u64 *system_image_guid);
|
||||
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
|
||||
|
@ -54,6 +54,8 @@ struct switchdev_attr {
|
||||
struct net_device *orig_dev;
|
||||
enum switchdev_attr_id id;
|
||||
u32 flags;
|
||||
void *complete_priv;
|
||||
void (*complete)(struct net_device *dev, int err, void *priv);
|
||||
union {
|
||||
struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
|
||||
u8 stp_state; /* PORT_STP_STATE */
|
||||
@ -75,6 +77,8 @@ struct switchdev_obj {
|
||||
struct net_device *orig_dev;
|
||||
enum switchdev_obj_id id;
|
||||
u32 flags;
|
||||
void *complete_priv;
|
||||
void (*complete)(struct net_device *dev, int err, void *priv);
|
||||
};
|
||||
|
||||
/* SWITCHDEV_OBJ_ID_PORT_VLAN */
|
||||
|
@ -19,8 +19,8 @@
|
||||
|
||||
#define MACSEC_MAX_KEY_LEN 128
|
||||
|
||||
#define DEFAULT_CIPHER_ID 0x0080020001000001ULL
|
||||
#define DEFAULT_CIPHER_ALT 0x0080C20001000001ULL
|
||||
#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL
|
||||
#define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL
|
||||
|
||||
#define MACSEC_MIN_ICV_LEN 8
|
||||
#define MACSEC_MAX_ICV_LEN 32
|
||||
|
@ -2030,7 +2030,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
|
||||
if (IS_ERR(map)) {
|
||||
verbose("fd %d is not pointing to valid bpf_map\n",
|
||||
insn->imm);
|
||||
fdput(f);
|
||||
return PTR_ERR(map);
|
||||
}
|
||||
|
||||
|
@ -61,6 +61,19 @@ static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
|
||||
e->flags |= MDB_FLAGS_OFFLOAD;
|
||||
}
|
||||
|
||||
static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
|
||||
{
|
||||
memset(ip, 0, sizeof(struct br_ip));
|
||||
ip->vid = entry->vid;
|
||||
ip->proto = entry->addr.proto;
|
||||
if (ip->proto == htons(ETH_P_IP))
|
||||
ip->u.ip4 = entry->addr.u.ip4;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
else
|
||||
ip->u.ip6 = entry->addr.u.ip6;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct net_device *dev)
|
||||
{
|
||||
@ -243,9 +256,45 @@ static inline size_t rtnl_mdb_nlmsg_size(void)
|
||||
+ nla_total_size(sizeof(struct br_mdb_entry));
|
||||
}
|
||||
|
||||
static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
|
||||
int type, struct net_bridge_port_group *pg)
|
||||
struct br_mdb_complete_info {
|
||||
struct net_bridge_port *port;
|
||||
struct br_ip ip;
|
||||
};
|
||||
|
||||
static void br_mdb_complete(struct net_device *dev, int err, void *priv)
|
||||
{
|
||||
struct br_mdb_complete_info *data = priv;
|
||||
struct net_bridge_port_group __rcu **pp;
|
||||
struct net_bridge_port_group *p;
|
||||
struct net_bridge_mdb_htable *mdb;
|
||||
struct net_bridge_mdb_entry *mp;
|
||||
struct net_bridge_port *port = data->port;
|
||||
struct net_bridge *br = port->br;
|
||||
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
spin_lock_bh(&br->multicast_lock);
|
||||
mdb = mlock_dereference(br->mdb, br);
|
||||
mp = br_mdb_ip_get(mdb, &data->ip);
|
||||
if (!mp)
|
||||
goto out;
|
||||
for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
|
||||
pp = &p->next) {
|
||||
if (p->port != port)
|
||||
continue;
|
||||
p->flags |= MDB_PG_FLAGS_OFFLOAD;
|
||||
}
|
||||
out:
|
||||
spin_unlock_bh(&br->multicast_lock);
|
||||
err:
|
||||
kfree(priv);
|
||||
}
|
||||
|
||||
static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
|
||||
struct br_mdb_entry *entry, int type)
|
||||
{
|
||||
struct br_mdb_complete_info *complete_info;
|
||||
struct switchdev_obj_port_mdb mdb = {
|
||||
.obj = {
|
||||
.id = SWITCHDEV_OBJ_ID_PORT_MDB,
|
||||
@ -268,9 +317,14 @@ static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
|
||||
|
||||
mdb.obj.orig_dev = port_dev;
|
||||
if (port_dev && type == RTM_NEWMDB) {
|
||||
err = switchdev_port_obj_add(port_dev, &mdb.obj);
|
||||
if (!err && pg)
|
||||
pg->flags |= MDB_PG_FLAGS_OFFLOAD;
|
||||
complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
|
||||
if (complete_info) {
|
||||
complete_info->port = p;
|
||||
__mdb_entry_to_br_ip(entry, &complete_info->ip);
|
||||
mdb.obj.complete_priv = complete_info;
|
||||
mdb.obj.complete = br_mdb_complete;
|
||||
switchdev_port_obj_add(port_dev, &mdb.obj);
|
||||
}
|
||||
} else if (port_dev && type == RTM_DELMDB) {
|
||||
switchdev_port_obj_del(port_dev, &mdb.obj);
|
||||
}
|
||||
@ -291,21 +345,21 @@ errout:
|
||||
rtnl_set_sk_err(net, RTNLGRP_MDB, err);
|
||||
}
|
||||
|
||||
void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg,
|
||||
int type)
|
||||
void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
|
||||
struct br_ip *group, int type, u8 flags)
|
||||
{
|
||||
struct br_mdb_entry entry;
|
||||
|
||||
memset(&entry, 0, sizeof(entry));
|
||||
entry.ifindex = pg->port->dev->ifindex;
|
||||
entry.addr.proto = pg->addr.proto;
|
||||
entry.addr.u.ip4 = pg->addr.u.ip4;
|
||||
entry.ifindex = port->dev->ifindex;
|
||||
entry.addr.proto = group->proto;
|
||||
entry.addr.u.ip4 = group->u.ip4;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
entry.addr.u.ip6 = pg->addr.u.ip6;
|
||||
entry.addr.u.ip6 = group->u.ip6;
|
||||
#endif
|
||||
entry.vid = pg->addr.vid;
|
||||
__mdb_entry_fill_flags(&entry, pg->flags);
|
||||
__br_mdb_notify(dev, &entry, type, pg);
|
||||
entry.vid = group->vid;
|
||||
__mdb_entry_fill_flags(&entry, flags);
|
||||
__br_mdb_notify(dev, port, &entry, type);
|
||||
}
|
||||
|
||||
static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
|
||||
@ -450,8 +504,7 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
}
|
||||
|
||||
static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
|
||||
struct br_ip *group, unsigned char state,
|
||||
struct net_bridge_port_group **pg)
|
||||
struct br_ip *group, unsigned char state)
|
||||
{
|
||||
struct net_bridge_mdb_entry *mp;
|
||||
struct net_bridge_port_group *p;
|
||||
@ -482,7 +535,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
|
||||
if (unlikely(!p))
|
||||
return -ENOMEM;
|
||||
rcu_assign_pointer(*pp, p);
|
||||
*pg = p;
|
||||
if (state == MDB_TEMPORARY)
|
||||
mod_timer(&p->timer, now + br->multicast_membership_interval);
|
||||
|
||||
@ -490,8 +542,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
|
||||
}
|
||||
|
||||
static int __br_mdb_add(struct net *net, struct net_bridge *br,
|
||||
struct br_mdb_entry *entry,
|
||||
struct net_bridge_port_group **pg)
|
||||
struct br_mdb_entry *entry)
|
||||
{
|
||||
struct br_ip ip;
|
||||
struct net_device *dev;
|
||||
@ -509,18 +560,10 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
|
||||
if (!p || p->br != br || p->state == BR_STATE_DISABLED)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&ip, 0, sizeof(ip));
|
||||
ip.vid = entry->vid;
|
||||
ip.proto = entry->addr.proto;
|
||||
if (ip.proto == htons(ETH_P_IP))
|
||||
ip.u.ip4 = entry->addr.u.ip4;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
else
|
||||
ip.u.ip6 = entry->addr.u.ip6;
|
||||
#endif
|
||||
__mdb_entry_to_br_ip(entry, &ip);
|
||||
|
||||
spin_lock_bh(&br->multicast_lock);
|
||||
ret = br_mdb_add_group(br, p, &ip, entry->state, pg);
|
||||
ret = br_mdb_add_group(br, p, &ip, entry->state);
|
||||
spin_unlock_bh(&br->multicast_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -528,7 +571,6 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
|
||||
static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
{
|
||||
struct net *net = sock_net(skb->sk);
|
||||
struct net_bridge_port_group *pg;
|
||||
struct net_bridge_vlan_group *vg;
|
||||
struct net_device *dev, *pdev;
|
||||
struct br_mdb_entry *entry;
|
||||
@ -558,15 +600,15 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
if (br_vlan_enabled(br) && vg && entry->vid == 0) {
|
||||
list_for_each_entry(v, &vg->vlan_list, vlist) {
|
||||
entry->vid = v->vid;
|
||||
err = __br_mdb_add(net, br, entry, &pg);
|
||||
err = __br_mdb_add(net, br, entry);
|
||||
if (err)
|
||||
break;
|
||||
__br_mdb_notify(dev, entry, RTM_NEWMDB, pg);
|
||||
__br_mdb_notify(dev, p, entry, RTM_NEWMDB);
|
||||
}
|
||||
} else {
|
||||
err = __br_mdb_add(net, br, entry, &pg);
|
||||
err = __br_mdb_add(net, br, entry);
|
||||
if (!err)
|
||||
__br_mdb_notify(dev, entry, RTM_NEWMDB, pg);
|
||||
__br_mdb_notify(dev, p, entry, RTM_NEWMDB);
|
||||
}
|
||||
|
||||
return err;
|
||||
@ -584,15 +626,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
|
||||
if (!netif_running(br->dev) || br->multicast_disabled)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&ip, 0, sizeof(ip));
|
||||
ip.vid = entry->vid;
|
||||
ip.proto = entry->addr.proto;
|
||||
if (ip.proto == htons(ETH_P_IP))
|
||||
ip.u.ip4 = entry->addr.u.ip4;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
else
|
||||
ip.u.ip6 = entry->addr.u.ip6;
|
||||
#endif
|
||||
__mdb_entry_to_br_ip(entry, &ip);
|
||||
|
||||
spin_lock_bh(&br->multicast_lock);
|
||||
mdb = mlock_dereference(br->mdb, br);
|
||||
@ -662,12 +696,12 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
|
||||
entry->vid = v->vid;
|
||||
err = __br_mdb_del(br, entry);
|
||||
if (!err)
|
||||
__br_mdb_notify(dev, entry, RTM_DELMDB, NULL);
|
||||
__br_mdb_notify(dev, p, entry, RTM_DELMDB);
|
||||
}
|
||||
} else {
|
||||
err = __br_mdb_del(br, entry);
|
||||
if (!err)
|
||||
__br_mdb_notify(dev, entry, RTM_DELMDB, NULL);
|
||||
__br_mdb_notify(dev, p, entry, RTM_DELMDB);
|
||||
}
|
||||
|
||||
return err;
|
||||
|
@ -283,7 +283,8 @@ static void br_multicast_del_pg(struct net_bridge *br,
|
||||
rcu_assign_pointer(*pp, p->next);
|
||||
hlist_del_init(&p->mglist);
|
||||
del_timer(&p->timer);
|
||||
br_mdb_notify(br->dev, p, RTM_DELMDB);
|
||||
br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
|
||||
p->flags);
|
||||
call_rcu_bh(&p->rcu, br_multicast_free_pg);
|
||||
|
||||
if (!mp->ports && !mp->mglist &&
|
||||
@ -705,7 +706,7 @@ static int br_multicast_add_group(struct net_bridge *br,
|
||||
if (unlikely(!p))
|
||||
goto err;
|
||||
rcu_assign_pointer(*pp, p);
|
||||
br_mdb_notify(br->dev, p, RTM_NEWMDB);
|
||||
br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
|
||||
|
||||
found:
|
||||
mod_timer(&p->timer, now + br->multicast_membership_interval);
|
||||
@ -1461,7 +1462,8 @@ br_multicast_leave_group(struct net_bridge *br,
|
||||
hlist_del_init(&p->mglist);
|
||||
del_timer(&p->timer);
|
||||
call_rcu_bh(&p->rcu, br_multicast_free_pg);
|
||||
br_mdb_notify(br->dev, p, RTM_DELMDB);
|
||||
br_mdb_notify(br->dev, port, group, RTM_DELMDB,
|
||||
p->flags);
|
||||
|
||||
if (!mp->ports && !mp->mglist &&
|
||||
netif_running(br->dev))
|
||||
|
@ -560,8 +560,8 @@ br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
|
||||
unsigned char flags);
|
||||
void br_mdb_init(void);
|
||||
void br_mdb_uninit(void);
|
||||
void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg,
|
||||
int type);
|
||||
void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
|
||||
struct br_ip *group, int type, u8 flags);
|
||||
void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
|
||||
int type);
|
||||
|
||||
|
@ -904,7 +904,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
|
||||
if (ifa->ifa_flags & IFA_F_SECONDARY) {
|
||||
prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
|
||||
if (!prim) {
|
||||
pr_warn("%s: bug: prim == NULL\n", __func__);
|
||||
/* if the device has been deleted, we don't perform
|
||||
* address promotion
|
||||
*/
|
||||
if (!in_dev->dead)
|
||||
pr_warn("%s: bug: prim == NULL\n", __func__);
|
||||
return;
|
||||
}
|
||||
if (iprim && iprim != prim) {
|
||||
|
@ -3176,35 +3176,9 @@ static void addrconf_gre_config(struct net_device *dev)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
|
||||
/* If the host route is cached on the addr struct make sure it is associated
|
||||
* with the proper table. e.g., enslavement can change and if so the cached
|
||||
* host route needs to move to the new table.
|
||||
*/
|
||||
static void l3mdev_check_host_rt(struct inet6_dev *idev,
|
||||
struct inet6_ifaddr *ifp)
|
||||
{
|
||||
if (ifp->rt) {
|
||||
u32 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
|
||||
|
||||
if (tb_id != ifp->rt->rt6i_table->tb6_id) {
|
||||
ip6_del_rt(ifp->rt);
|
||||
ifp->rt = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void l3mdev_check_host_rt(struct inet6_dev *idev,
|
||||
struct inet6_ifaddr *ifp)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static int fixup_permanent_addr(struct inet6_dev *idev,
|
||||
struct inet6_ifaddr *ifp)
|
||||
{
|
||||
l3mdev_check_host_rt(idev, ifp);
|
||||
|
||||
if (!ifp->rt) {
|
||||
struct rt6_info *rt;
|
||||
|
||||
@ -3304,6 +3278,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
|
||||
break;
|
||||
|
||||
if (event == NETDEV_UP) {
|
||||
/* restore routes for permanent addresses */
|
||||
addrconf_permanent_addr(dev);
|
||||
|
||||
if (!addrconf_qdisc_ok(dev)) {
|
||||
/* device is not ready yet. */
|
||||
pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
|
||||
@ -3337,9 +3314,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
|
||||
run_pending = 1;
|
||||
}
|
||||
|
||||
/* restore routes for permanent addresses */
|
||||
addrconf_permanent_addr(dev);
|
||||
|
||||
switch (dev->type) {
|
||||
#if IS_ENABLED(CONFIG_IPV6_SIT)
|
||||
case ARPHRD_SIT:
|
||||
@ -3556,6 +3530,8 @@ restart:
|
||||
|
||||
INIT_LIST_HEAD(&del_list);
|
||||
list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
|
||||
struct rt6_info *rt = NULL;
|
||||
|
||||
addrconf_del_dad_work(ifa);
|
||||
|
||||
write_unlock_bh(&idev->lock);
|
||||
@ -3568,6 +3544,9 @@ restart:
|
||||
ifa->state = 0;
|
||||
if (!(ifa->flags & IFA_F_NODAD))
|
||||
ifa->flags |= IFA_F_TENTATIVE;
|
||||
|
||||
rt = ifa->rt;
|
||||
ifa->rt = NULL;
|
||||
} else {
|
||||
state = ifa->state;
|
||||
ifa->state = INET6_IFADDR_STATE_DEAD;
|
||||
@ -3578,6 +3557,9 @@ restart:
|
||||
|
||||
spin_unlock_bh(&ifa->lock);
|
||||
|
||||
if (rt)
|
||||
ip6_del_rt(rt);
|
||||
|
||||
if (state != INET6_IFADDR_STATE_DEAD) {
|
||||
__ipv6_ifa_notify(RTM_DELADDR, ifa);
|
||||
inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
|
||||
@ -5343,10 +5325,10 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
|
||||
if (rt)
|
||||
ip6_del_rt(rt);
|
||||
}
|
||||
dst_hold(&ifp->rt->dst);
|
||||
|
||||
ip6_del_rt(ifp->rt);
|
||||
|
||||
if (ifp->rt) {
|
||||
dst_hold(&ifp->rt->dst);
|
||||
ip6_del_rt(ifp->rt);
|
||||
}
|
||||
rt_genid_bump_ipv6(net);
|
||||
break;
|
||||
}
|
||||
|
@ -305,6 +305,8 @@ static void switchdev_port_attr_set_deferred(struct net_device *dev,
|
||||
if (err && err != -EOPNOTSUPP)
|
||||
netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
|
||||
err, attr->id);
|
||||
if (attr->complete)
|
||||
attr->complete(dev, err, attr->complete_priv);
|
||||
}
|
||||
|
||||
static int switchdev_port_attr_set_defer(struct net_device *dev,
|
||||
@ -434,6 +436,8 @@ static void switchdev_port_obj_add_deferred(struct net_device *dev,
|
||||
if (err && err != -EOPNOTSUPP)
|
||||
netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
|
||||
err, obj->id);
|
||||
if (obj->complete)
|
||||
obj->complete(dev, err, obj->complete_priv);
|
||||
}
|
||||
|
||||
static int switchdev_port_obj_add_defer(struct net_device *dev,
|
||||
@ -502,6 +506,8 @@ static void switchdev_port_obj_del_deferred(struct net_device *dev,
|
||||
if (err && err != -EOPNOTSUPP)
|
||||
netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
|
||||
err, obj->id);
|
||||
if (obj->complete)
|
||||
obj->complete(dev, err, obj->complete_priv);
|
||||
}
|
||||
|
||||
static int switchdev_port_obj_del_defer(struct net_device *dev,
|
||||
|
Loading…
Reference in New Issue
Block a user