mlx5/core: E-Switch, Create ACL FT for eswitch manager in switchdev mode

ACL flow table is required in switchdev mode when metadata is enabled,
driver creates such table when loading each vport. However, not every
vport is loaded in switchdev mode. Such as ECPF if it's the eswitch manager.
In this case, ACL flow table is still needed.

To make it modularized, create ACL flow table for eswitch manager as
default and skip such operations when loading manager vport.

Also, there is no need to load the eswitch manager vport in switchdev mode.
This means there is no need to load it on regular connect-x HCAs where
the PF is the eswitch manager. This will avoid creating duplicate ACL
flow table for host PF vport.

Fixes: 29bcb6e4fe ("net/mlx5e: E-Switch, Use metadata for vport matching in send-to-vport rules")
Fixes: eb8e9fae0a ("mlx5/core: E-Switch, Allocate ECPF vport if it's an eswitch manager")
Fixes: 5019833d66 ("net/mlx5: E-switch, Introduce helper function to enable/disable vports")
Signed-off-by: Bodong Wang <bodong@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Bodong Wang 2023-09-05 10:48:46 -07:00 committed by David S. Miller
parent b7558a7752
commit 344134609a
2 changed files with 51 additions and 19 deletions

View File

@ -1276,12 +1276,19 @@ int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events)
{
bool pf_needed;
int ret;
pf_needed = mlx5_core_is_ecpf_esw_manager(esw->dev) ||
esw->mode == MLX5_ESWITCH_LEGACY;
/* Enable PF vport */
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF, enabled_events);
if (ret)
return ret;
if (pf_needed) {
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF,
enabled_events);
if (ret)
return ret;
}
/* Enable external host PF HCA */
ret = host_pf_enable_hca(esw->dev);
@ -1317,7 +1324,8 @@ ec_vf_err:
ecpf_err:
host_pf_disable_hca(esw->dev);
pf_hca_err:
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
if (pf_needed)
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
return ret;
}
@ -1335,7 +1343,10 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
}
host_pf_disable_hca(esw->dev);
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
esw->mode == MLX5_ESWITCH_LEGACY)
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
}
static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)

View File

@ -3216,26 +3216,47 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
esw_acl_ingress_ofld_cleanup(esw, vport);
}
static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
struct mlx5_vport *uplink, *manager;
int ret;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
if (IS_ERR(vport))
return PTR_ERR(vport);
uplink = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
if (IS_ERR(uplink))
return PTR_ERR(uplink);
return esw_vport_create_offloads_acl_tables(esw, vport);
ret = esw_vport_create_offloads_acl_tables(esw, uplink);
if (ret)
return ret;
manager = mlx5_eswitch_get_vport(esw, esw->manager_vport);
if (IS_ERR(manager)) {
ret = PTR_ERR(manager);
goto err_manager;
}
ret = esw_vport_create_offloads_acl_tables(esw, manager);
if (ret)
goto err_manager;
return 0;
err_manager:
esw_vport_destroy_offloads_acl_tables(esw, uplink);
return ret;
}
static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
if (IS_ERR(vport))
return;
vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
if (!IS_ERR(vport))
esw_vport_destroy_offloads_acl_tables(esw, vport);
esw_vport_destroy_offloads_acl_tables(esw, vport);
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
if (!IS_ERR(vport))
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
@ -3280,7 +3301,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.indir = indir;
err = esw_create_uplink_offloads_acl_tables(esw);
err = esw_create_offloads_acl_tables(esw);
if (err)
goto create_acl_err;
@ -3321,7 +3342,7 @@ create_fdb_err:
create_restore_err:
esw_destroy_offloads_table(esw);
create_offloads_err:
esw_destroy_uplink_offloads_acl_tables(esw);
esw_destroy_offloads_acl_tables(esw);
create_acl_err:
mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
create_indir_err:
@ -3337,7 +3358,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_offloads_fdb_tables(esw);
esw_destroy_restore_table(esw);
esw_destroy_offloads_table(esw);
esw_destroy_uplink_offloads_acl_tables(esw);
esw_destroy_offloads_acl_tables(esw);
mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
}