mlx5-updates-2023-06-21

mlx5 driver minor cleanup and fixes to net-next
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmSV8icACgkQSD+KveBX
 +j6WgAf+Ph5w2dAfuadjlAcQlK2WKXyS1OliVpvCuglepgsP9Zree11WVoyiAKef
 70Xd5Vu4xAQTdpCsw4DGM7sh6xW1SxvZFP1A/FVk7UU1E0zL2SXzKyEHK29I1MH5
 nej2hRf/W1dwYtxfwNOTKAFco3wr0e1vLgMDEqZBZbJXzcUetDRADkgWrx9U+pno
 lBPhVMZWK5R0GzOjlWZXoedaXx2RIcm+5U02ov5S6d1y8AA+sE93tiYxrP9z/2lj
 nml1KeQQl0Ku/y+e8RMSUd9mPdomQZi6CVHMD1wV5DNv6dnrT1bPrUdt4torSXEI
 KAzkVie979XP2jvkDKY2nyXZ8dn7cw==
 =woKn
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-updates-2023-06-21' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2023-06-21

mlx5 driver minor cleanup and fixes to net-next

* tag 'mlx5-updates-2023-06-21' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5: Remove pointless vport lookup from mlx5_esw_check_port_type()
  net/mlx5: Remove redundant check from mlx5_esw_query_vport_vhca_id()
  net/mlx5: Remove redundant is_mdev_switchdev_mode() check from is_ib_rep_supported()
  net/mlx5: Remove redundant MLX5_ESWITCH_MANAGER() check from is_ib_rep_supported()
  net/mlx5e: E-Switch, Fix shared fdb error flow
  net/mlx5e: Remove redundant comment
  net/mlx5e: E-Switch, Pass other_vport flag if vport is not 0
  net/mlx5e: E-Switch, Use xarray for devcom paired device index
  net/mlx5e: E-Switch, Add peer fdb miss rules for vport manager or ecpf
  net/mlx5e: Use vhca_id for device index in vport rx rules
  net/mlx5: Lag, Remove duplicate code checking lag is supported
  net/mlx5: Fix error code in mlx5_is_reset_now_capable()
  net/mlx5: Fix reserved at offset in hca_cap register
  net/mlx5: Fix SFs kernel documentation error
  net/mlx5: Fix UAF in mlx5_eswitch_cleanup()
====================

Link: https://lore.kernel.org/r/20230623192907.39033-1-saeed@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2023-06-24 15:48:04 -07:00
commit b545a13ca9
10 changed files with 84 additions and 68 deletions

View File

@ -51,19 +51,21 @@ This will allow user to configure the SF before the SF have been fully probed,
which will save time.
Usage example:
Create SF:
$ devlink port add pci/0000:08:00.0 flavour pcisf pfnum 0 sfnum 11
$ devlink port function set pci/0000:08:00.0/32768 \
hw_addr 00:00:00:00:00:11 state active
Enable ETH auxiliary device:
$ devlink dev param set auxiliary/mlx5_core.sf.1 \
name enable_eth value true cmode driverinit
- Create SF::
Now, in order to fully probe the SF, use devlink reload:
$ devlink dev reload auxiliary/mlx5_core.sf.1
$ devlink port add pci/0000:08:00.0 flavour pcisf pfnum 0 sfnum 11
$ devlink port function set pci/0000:08:00.0/32768 hw_addr 00:00:00:00:00:11 state active
mlx5 supports ETH,rdma and vdpa (vnet) auxiliary devices devlink params (see :ref:`Documentation/networking/devlink/devlink-params.rst`)
- Enable ETH auxiliary device::
$ devlink dev param set auxiliary/mlx5_core.sf.1 name enable_eth value true cmode driverinit
- Now, in order to fully probe the SF, use devlink reload::
$ devlink dev reload auxiliary/mlx5_core.sf.1
mlx5 supports ETH,rdma and vdpa (vnet) auxiliary devices devlink params (see :ref:`Documentation/networking/devlink/devlink-params.rst <devlink_params_generic>`).
mlx5 supports subfunction management using devlink port (see :ref:`Documentation/networking/devlink/devlink-port.rst <devlink_port>`) interface.

View File

@ -151,12 +151,6 @@ static bool is_ib_rep_supported(struct mlx5_core_dev *dev)
if (!is_eth_rep_supported(dev))
return false;
if (!MLX5_ESWITCH_MANAGER(dev))
return false;
if (!is_mdev_switchdev_mode(dev))
return false;
if (mlx5_core_mp_enabled(dev))
return false;

View File

@ -408,7 +408,7 @@ static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5
mlx5_devcom_for_each_peer_entry(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
peer_esw, tmp) {
int peer_rule_idx = mlx5_get_dev_index(peer_esw->dev);
u16 peer_rule_idx = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
struct mlx5e_rep_sq_peer *sq_peer;
int err;
@ -1581,7 +1581,7 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep,
struct mlx5_eswitch *peer_esw)
{
int i = mlx5_get_dev_index(peer_esw->dev);
u16 i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_sq *rep_sq;
@ -1603,7 +1603,7 @@ static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep,
struct mlx5_eswitch *peer_esw)
{
int i = mlx5_get_dev_index(peer_esw->dev);
u16 i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
struct mlx5_flow_handle *flow_rule;
struct mlx5e_rep_sq_peer *sq_peer;
struct mlx5e_rep_priv *rpriv;

View File

@ -1751,16 +1751,14 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
if (!MLX5_VPORT_MANAGER(dev) && !MLX5_ESWITCH_MANAGER(dev))
return 0;
esw = kzalloc(sizeof(*esw), GFP_KERNEL);
if (!esw)
return -ENOMEM;
err = devl_params_register(priv_to_devlink(dev), mlx5_eswitch_params,
ARRAY_SIZE(mlx5_eswitch_params));
if (err)
return err;
esw = kzalloc(sizeof(*esw), GFP_KERNEL);
if (!esw) {
err = -ENOMEM;
goto unregister_param;
}
goto free_esw;
esw->dev = dev;
esw->manager_vport = mlx5_eswitch_manager_vport(dev);
@ -1821,10 +1819,10 @@ abort:
if (esw->work_queue)
destroy_workqueue(esw->work_queue);
debugfs_remove_recursive(esw->debugfs_root);
kfree(esw);
unregister_param:
devl_params_unregister(priv_to_devlink(dev), mlx5_eswitch_params,
ARRAY_SIZE(mlx5_eswitch_params));
free_esw:
kfree(esw);
return err;
}
@ -1848,9 +1846,9 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_offloads_cleanup(esw);
mlx5_esw_vports_cleanup(esw);
debugfs_remove_recursive(esw->debugfs_root);
kfree(esw);
devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,
ARRAY_SIZE(mlx5_eswitch_params));
kfree(esw);
}
/* Vport Administration */
@ -1910,12 +1908,6 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark)
{
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return false;
return xa_get_mark(&esw->vports, vport_num, mark);
}

View File

@ -353,7 +353,7 @@ struct mlx5_eswitch {
u32 large_group_num;
} params;
struct blocking_notifier_head n_head;
bool paired[MLX5_MAX_PORTS];
struct xarray paired;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);

View File

@ -1069,6 +1069,9 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
void *misc;
int err;
if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
return 0;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
@ -1177,11 +1180,14 @@ alloc_flows_err:
static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev)
{
u16 peer_index = mlx5_get_dev_index(peer_dev);
struct mlx5_flow_handle **flows;
struct mlx5_vport *vport;
unsigned long i;
flows = esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)];
flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
if (!flows)
return;
if (mlx5_core_ec_sriov_enabled(esw->dev)) {
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
@ -1206,7 +1212,9 @@ static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
mlx5_del_flow_rules(flows[vport->index]);
}
kvfree(flows);
esw->fdb_table.offloads.peer_miss_rules[peer_index] = NULL;
}
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
@ -1896,7 +1904,6 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
if (!flow_group_in)
return -ENOMEM;
/* create vport rx group */
mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
@ -2543,13 +2550,13 @@ static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
return err;
}
static int esw_master_egress_create_resources(struct mlx5_flow_namespace *egress_ns,
static int esw_master_egress_create_resources(struct mlx5_eswitch *esw,
struct mlx5_flow_namespace *egress_ns,
struct mlx5_vport *vport, size_t count)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {
.max_fte = count, .prio = 0, .level = 0,
.flags = MLX5_FLOW_TABLE_OTHER_VPORT,
};
struct mlx5_flow_table *acl;
struct mlx5_flow_group *g;
@ -2564,6 +2571,9 @@ static int esw_master_egress_create_resources(struct mlx5_flow_namespace *egress
if (!flow_group_in)
return -ENOMEM;
if (vport->vport || mlx5_core_is_ecpf(esw->dev))
ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport);
if (IS_ERR(acl)) {
err = PTR_ERR(acl);
@ -2608,8 +2618,12 @@ out:
static void esw_master_egress_destroy_resources(struct mlx5_vport *vport)
{
if (!xa_empty(&vport->egress.offloads.bounce_rules))
return;
mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp);
vport->egress.offloads.bounce_grp = NULL;
mlx5_destroy_flow_table(vport->egress.acl);
vport->egress.acl = NULL;
}
static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
@ -2634,7 +2648,7 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
if (vport->egress.acl && vport->egress.type != VPORT_EGRESS_ACL_TYPE_SHARED_FDB)
return 0;
err = esw_master_egress_create_resources(egress_ns, vport, count);
err = esw_master_egress_create_resources(esw, egress_ns, vport, count);
if (err)
return err;
@ -2799,15 +2813,21 @@ static int mlx5_esw_offloads_devcom_event(int event,
struct mlx5_eswitch *esw = my_data;
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
struct mlx5_eswitch *peer_esw = event_data;
u16 esw_i, peer_esw_i;
bool esw_paired;
int err;
peer_esw_i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
esw_i = MLX5_CAP_GEN(esw->dev, vhca_id);
esw_paired = !!xa_load(&esw->paired, peer_esw_i);
switch (event) {
case ESW_OFFLOADS_DEVCOM_PAIR:
if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
break;
if (esw->paired[mlx5_get_dev_index(peer_esw->dev)])
if (esw_paired)
break;
err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
@ -2821,23 +2841,29 @@ static int mlx5_esw_offloads_devcom_event(int event,
if (err)
goto err_pair;
esw->paired[mlx5_get_dev_index(peer_esw->dev)] = true;
peer_esw->paired[mlx5_get_dev_index(esw->dev)] = true;
err = xa_insert(&esw->paired, peer_esw_i, peer_esw, GFP_KERNEL);
if (err)
goto err_xa;
err = xa_insert(&peer_esw->paired, esw_i, esw, GFP_KERNEL);
if (err)
goto err_peer_xa;
esw->num_peers++;
peer_esw->num_peers++;
mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
break;
case ESW_OFFLOADS_DEVCOM_UNPAIR:
if (!esw->paired[mlx5_get_dev_index(peer_esw->dev)])
if (!esw_paired)
break;
peer_esw->num_peers--;
esw->num_peers--;
if (!esw->num_peers && !peer_esw->num_peers)
mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
esw->paired[mlx5_get_dev_index(peer_esw->dev)] = false;
peer_esw->paired[mlx5_get_dev_index(esw->dev)] = false;
xa_erase(&peer_esw->paired, esw_i);
xa_erase(&esw->paired, peer_esw_i);
mlx5_esw_offloads_unpair(peer_esw, esw);
mlx5_esw_offloads_unpair(esw, peer_esw);
mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
@ -2846,6 +2872,10 @@ static int mlx5_esw_offloads_devcom_event(int event,
return 0;
err_peer_xa:
xa_erase(&esw->paired, peer_esw_i);
err_xa:
mlx5_esw_offloads_unpair(peer_esw, esw);
err_pair:
mlx5_esw_offloads_unpair(esw, peer_esw);
err_peer:
@ -2868,9 +2898,10 @@ void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return;
if (!mlx5_is_lag_supported(esw->dev))
if (!mlx5_lag_is_supported(esw->dev))
return;
xa_init(&esw->paired);
mlx5_devcom_register_component(devcom,
MLX5_DEVCOM_ESW_OFFLOADS,
mlx5_esw_offloads_devcom_event,
@ -2890,7 +2921,7 @@ void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return;
if (!mlx5_is_lag_supported(esw->dev))
if (!mlx5_lag_is_supported(esw->dev))
return;
mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
@ -2898,6 +2929,7 @@ void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
xa_destroy(&esw->paired);
}
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
@ -3929,9 +3961,6 @@ static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num,
int err;
*vhca_id = 0;
if (mlx5_esw_is_manager_vport(esw, vport_num) ||
!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
return -EPERM;
query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
if (!query_ctx)

View File

@ -327,7 +327,7 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
if (!MLX5_CAP_GEN(dev, fast_teardown)) {
mlx5_core_warn(dev, "fast teardown is not supported by firmware\n");
return -EOPNOTSUPP;
return false;
}
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);

View File

@ -1268,14 +1268,21 @@ recheck:
mlx5_ldev_put(ldev);
}
bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
{
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
return false;
return true;
}
void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
{
int err;
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
(MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS ||
MLX5_CAP_GEN(dev, num_lag_ports) <= 1))
if (!mlx5_lag_is_supported(dev))
return;
recheck:

View File

@ -74,15 +74,7 @@ struct mlx5_lag {
struct lag_mpesw lag_mpesw;
};
static inline bool mlx5_is_lag_supported(struct mlx5_core_dev *dev)
{
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
return false;
return true;
}
bool mlx5_lag_is_supported(struct mlx5_core_dev *dev);
static inline struct mlx5_lag *
mlx5_lag_dev(struct mlx5_core_dev *dev)

View File

@ -1710,9 +1710,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 regexp_params[0x1];
u8 uar_sz[0x6];
u8 port_selection_cap[0x1];
u8 reserved_at_248[0x1];
u8 reserved_at_251[0x1];
u8 umem_uid_0[0x1];
u8 reserved_at_250[0x5];
u8 reserved_at_253[0x5];
u8 log_pg_sz[0x8];
u8 bf[0x1];