net/mlx5: Implement vports admin state backup/restore

Save the user configuration in the vport sturct.
Restore saved old configuration upon vport enable.

Signed-off-by: Mohamad Haj Yahia <mohamad@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Mohamad Haj Yahia 2016-09-09 17:35:24 +03:00 committed by David S. Miller
parent c2d6e31a00
commit 1ab2068a4c
2 changed files with 124 additions and 141 deletions

View File

@ -116,57 +116,6 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
}
/* E-Switch vport context HW commands */
static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport,
u32 *out, int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {0};
MLX5_SET(query_nic_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(query_esw_vport_context_in, in, other_vport, 1);
return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
u16 *vlan, u8 *qos)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {0};
int err;
bool cvlan_strip;
bool cvlan_insert;
*vlan = 0;
*qos = 0;
if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
return -ENOTSUPP;
err = query_esw_vport_context_cmd(dev, vport, out, sizeof(out));
if (err)
goto out;
cvlan_strip = MLX5_GET(query_esw_vport_context_out, out,
esw_vport_context.vport_cvlan_strip);
cvlan_insert = MLX5_GET(query_esw_vport_context_out, out,
esw_vport_context.vport_cvlan_insert);
if (cvlan_strip || cvlan_insert) {
*vlan = MLX5_GET(query_esw_vport_context_out, out,
esw_vport_context.cvlan_id);
*qos = MLX5_GET(query_esw_vport_context_out, out,
esw_vport_context.cvlan_pcp);
}
esw_debug(dev, "Query Vport[%d] cvlan: VLAN %d qos=%d\n",
vport, *vlan, *qos);
out:
return err;
}
static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
void *in, int inlen)
{
@ -921,7 +870,7 @@ static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
vport_num, promisc_all, promisc_mc);
if (!vport->trusted || !vport->enabled) {
if (!vport->info.trusted || !vport->enabled) {
promisc_uc = 0;
promisc_mc = 0;
promisc_all = 0;
@ -1257,30 +1206,20 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_flow_spec *spec;
u8 smac[ETH_ALEN];
int err = 0;
u8 *smac_v;
if (vport->spoofchk) {
err = mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, smac);
if (err) {
esw_warn(esw->dev,
"vport[%d] configure ingress rules failed, query smac failed, err(%d)\n",
vport->vport, err);
return err;
}
if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
mlx5_core_warn(esw->dev,
"vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
vport->vport);
return -EPERM;
if (!is_valid_ether_addr(smac)) {
mlx5_core_warn(esw->dev,
"vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
vport->vport);
return -EPERM;
}
}
esw_vport_cleanup_ingress_rules(esw, vport);
if (!vport->vlan && !vport->qos && !vport->spoofchk) {
if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
esw_vport_disable_ingress_acl(esw, vport);
return 0;
}
@ -1289,7 +1228,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
esw_debug(esw->dev,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->vlan, vport->qos);
vport->vport, vport->info.vlan, vport->info.qos);
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
@ -1299,16 +1238,16 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
goto out;
}
if (vport->vlan || vport->qos)
if (vport->info.vlan || vport->info.qos)
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
if (vport->spoofchk) {
if (vport->info.spoofchk) {
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
smac_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
outer_headers.smac_47_16);
ether_addr_copy(smac_v, smac);
ether_addr_copy(smac_v, vport->info.mac);
}
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
@ -1354,7 +1293,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
esw_vport_cleanup_egress_rules(esw, vport);
if (!vport->vlan && !vport->qos) {
if (!vport->info.vlan && !vport->info.qos) {
esw_vport_disable_egress_acl(esw, vport);
return 0;
}
@ -1363,7 +1302,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->vlan, vport->qos);
vport->vport, vport->info.vlan, vport->info.qos);
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
@ -1377,7 +1316,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
vport->egress.allowed_vlan =
@ -1411,6 +1350,41 @@ out:
return err;
}
static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
{
((u8 *)node_guid)[7] = mac[0];
((u8 *)node_guid)[6] = mac[1];
((u8 *)node_guid)[5] = mac[2];
((u8 *)node_guid)[4] = 0xff;
((u8 *)node_guid)[3] = 0xfe;
((u8 *)node_guid)[2] = mac[3];
((u8 *)node_guid)[1] = mac[4];
((u8 *)node_guid)[0] = mac[5];
}
static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int vport_num = vport->vport;
if (!vport_num)
return;
mlx5_modify_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
vport_num,
vport->info.link_state);
mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac);
mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, vport->info.node_guid);
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
(vport->info.vlan || vport->info.qos));
/* Only legacy mode needs ACLs */
if (esw->mode == SRIOV_LEGACY) {
esw_vport_ingress_config(esw, vport);
esw_vport_egress_config(esw, vport);
}
}
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events)
{
@ -1421,23 +1395,17 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
/* Only VFs need ACLs for VST and spoofchk filtering */
if (vport_num && esw->mode == SRIOV_LEGACY) {
esw_vport_ingress_config(esw, vport);
esw_vport_egress_config(esw, vport);
}
mlx5_modify_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
vport_num,
MLX5_ESW_VPORT_ADMIN_STATE_AUTO);
/* Restore old vport configuration */
esw_apply_vport_conf(esw, vport);
/* Sync with current vport context */
vport->enabled_events = enable_events;
vport->enabled = true;
/* only PF is trusted by default */
vport->trusted = (vport_num) ? false : true;
if (!vport_num)
vport->info.trusted = true;
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@ -1457,11 +1425,6 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
vport->enabled = false;
synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
mlx5_modify_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
vport_num,
MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
/* Wait for current already scheduled events to complete */
flush_workqueue(esw->work_queue);
/* Disable events from this vport */
@ -1473,7 +1436,12 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
if (vport_num && esw->mode == SRIOV_LEGACY) {
mlx5_modify_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
vport_num,
MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
}
@ -1645,6 +1613,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
struct mlx5_vport *vport = &esw->vports[vport_num];
vport->vport = vport_num;
vport->info.link_state = MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
vport->dev = dev;
INIT_WORK(&vport->vport_change_handler,
esw_vport_change_handler);
@ -1705,18 +1674,6 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
{
((u8 *)node_guid)[7] = mac[0];
((u8 *)node_guid)[6] = mac[1];
((u8 *)node_guid)[5] = mac[2];
((u8 *)node_guid)[4] = 0xff;
((u8 *)node_guid)[3] = 0xfe;
((u8 *)node_guid)[2] = mac[3];
((u8 *)node_guid)[1] = mac[4];
((u8 *)node_guid)[0] = mac[5];
}
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN])
{
@ -1729,13 +1686,15 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
if (evport->spoofchk && !is_valid_ether_addr(mac)) {
if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
mlx5_core_warn(esw->dev,
"MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
vport);
return -EPERM;
err = -EPERM;
goto unlock;
}
err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
@ -1743,7 +1702,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
mlx5_core_warn(esw->dev,
"Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
vport, err);
return err;
goto unlock;
}
node_guid_gen_from_mac(&node_guid, mac);
@ -1753,9 +1712,12 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
"Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
vport, err);
mutex_lock(&esw->state_lock);
ether_addr_copy(evport->info.mac, mac);
evport->info.node_guid = node_guid;
if (evport->enabled && esw->mode == SRIOV_LEGACY)
err = esw_vport_ingress_config(esw, evport);
unlock:
mutex_unlock(&esw->state_lock);
return err;
}
@ -1763,22 +1725,38 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int vport, int link_state)
{
struct mlx5_vport *evport;
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
return mlx5_modify_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
vport, link_state);
mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
err = mlx5_modify_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
vport, link_state);
if (err) {
mlx5_core_warn(esw->dev,
"Failed to set vport %d link state, err = %d",
vport, err);
goto unlock;
}
evport->info.link_state = link_state;
unlock:
mutex_unlock(&esw->state_lock);
return 0;
}
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi)
{
struct mlx5_vport *evport;
u16 vlan;
u8 qos;
if (!ESW_ALLOWED(esw))
return -EPERM;
@ -1790,14 +1768,14 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
memset(ivi, 0, sizeof(*ivi));
ivi->vf = vport - 1;
mlx5_query_nic_vport_mac_address(esw->dev, vport, ivi->mac);
ivi->linkstate = mlx5_query_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
vport);
query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
ivi->vlan = vlan;
ivi->qos = qos;
ivi->spoofchk = evport->spoofchk;
mutex_lock(&esw->state_lock);
ether_addr_copy(ivi->mac, evport->info.mac);
ivi->linkstate = evport->info.link_state;
ivi->vlan = evport->info.vlan;
ivi->qos = evport->info.qos;
ivi->spoofchk = evport->info.spoofchk;
ivi->trusted = evport->info.trusted;
mutex_unlock(&esw->state_lock);
return 0;
}
@ -1817,23 +1795,23 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
if (vlan || qos)
set = 1;
mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
if (err)
return err;
goto unlock;
mutex_lock(&esw->state_lock);
evport->vlan = vlan;
evport->qos = qos;
evport->info.vlan = vlan;
evport->info.qos = qos;
if (evport->enabled && esw->mode == SRIOV_LEGACY) {
err = esw_vport_ingress_config(esw, evport);
if (err)
goto out;
goto unlock;
err = esw_vport_egress_config(esw, evport);
}
out:
unlock:
mutex_unlock(&esw->state_lock);
return err;
}
@ -1850,16 +1828,14 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
evport = &esw->vports[vport];
mutex_lock(&esw->state_lock);
pschk = evport->spoofchk;
evport->spoofchk = spoofchk;
if (evport->enabled && esw->mode == SRIOV_LEGACY) {
evport = &esw->vports[vport];
pschk = evport->info.spoofchk;
evport->info.spoofchk = spoofchk;
if (evport->enabled && esw->mode == SRIOV_LEGACY)
err = esw_vport_ingress_config(esw, evport);
if (err)
evport->spoofchk = pschk;
}
if (err)
evport->info.spoofchk = pschk;
mutex_unlock(&esw->state_lock);
return err;
@ -1875,10 +1851,9 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
evport = &esw->vports[vport];
mutex_lock(&esw->state_lock);
evport->trusted = setting;
evport = &esw->vports[vport];
evport->info.trusted = setting;
if (evport->enabled)
esw_vport_change_handle_locked(evport);
mutex_unlock(&esw->state_lock);

View File

@ -109,6 +109,16 @@ struct vport_egress {
struct mlx5_flow_rule *drop_rule;
};
struct mlx5_vport_info {
u8 mac[ETH_ALEN];
u16 vlan;
u8 qos;
u64 node_guid;
int link_state;
bool spoofchk;
bool trusted;
};
struct mlx5_vport {
struct mlx5_core_dev *dev;
int vport;
@ -121,10 +131,8 @@ struct mlx5_vport {
struct vport_ingress ingress;
struct vport_egress egress;
u16 vlan;
u8 qos;
bool spoofchk;
bool trusted;
struct mlx5_vport_info info;
bool enabled;
u16 enabled_events;
};