Merge branch 'mlx5-updates-2024-09-11'

Saeed Mahameed says:

====================
Misc updates to mlx5 driver:

1) Fix HW steering ret value and align with kdoc
2) Flow steering cleanups and add support for no append at software level
3) Support for sync reset using hot reset
4) RX SW counter to cover no-split events in header/data split mode
5) Make affinity of SFs configurable
====================

Link: https://patch.msgid.link/20240911201757.1505453-1-saeed@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-09-12 20:48:33 -07:00
commit b523f23f5c
22 changed files with 873 additions and 568 deletions

View File

@ -218,6 +218,22 @@ the software port.
[#accel]_.
- Informative
* - `rx[i]_hds_nosplit_packets`
- Number of packets that were not split in header/data split mode. A
packet will not get split when the hardware does not support its
protocol splitting. An example such a protocol is ICMPv4/v6. Currently
TCP and UDP with IPv4/IPv6 are supported for header/data split
[#accel]_.
- Informative
* - `rx[i]_hds_nosplit_bytes`
- Number of bytes for packets that were not split in header/data split
mode. A packet will not get split when the hardware does not support its
protocol splitting. An example such a protocol is ICMPv4/v6. Currently
TCP and UDP with IPv4/IPv6 are supported for header/data split
[#accel]_.
- Informative
* - `rx[i]_lro_packets`
- The number of LRO packets received on ring i [#accel]_.
- Acceleration

View File

@ -754,6 +754,8 @@ static const char *cmd_status_str(u8 status)
return "bad resource";
case MLX5_CMD_STAT_RES_BUSY:
return "resource busy";
case MLX5_CMD_STAT_NOT_READY:
return "FW not ready";
case MLX5_CMD_STAT_LIM_ERR:
return "limits exceeded";
case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
@ -787,6 +789,7 @@ static int cmd_status_to_err(u8 status)
case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
case MLX5_CMD_STAT_NOT_READY: return -EAGAIN;
case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
@ -815,14 +818,16 @@ EXPORT_SYMBOL(mlx5_cmd_out_err);
static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
{
u16 opcode, op_mod;
u8 status;
u16 uid;
opcode = in_to_opcode(in);
op_mod = MLX5_GET(mbox_in, in, op_mod);
uid = MLX5_GET(mbox_in, in, uid);
status = MLX5_GET(mbox_out, out, status);
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
opcode != MLX5_CMD_OP_CREATE_UCTX)
opcode != MLX5_CMD_OP_CREATE_UCTX && status != MLX5_CMD_STAT_NOT_READY)
mlx5_cmd_out_err(dev, opcode, op_mod, out);
}

View File

@ -203,10 +203,10 @@ TRACE_EVENT(mlx5_fs_set_fte,
fs_get_obj(__entry->fg, fte->node.parent);
__entry->group_index = __entry->fg->id;
__entry->index = fte->index;
__entry->action = fte->action.action;
__entry->action = fte->act_dests.action.action;
__entry->mask_enable = __entry->fg->mask.match_criteria_enable;
__entry->flow_tag = fte->flow_context.flow_tag;
__entry->flow_source = fte->flow_context.flow_source;
__entry->flow_tag = fte->act_dests.flow_context.flow_tag;
__entry->flow_source = fte->act_dests.flow_context.flow_source;
memcpy(__entry->mask_outer,
MLX5_ADDR_OF(fte_match_param,
&__entry->fg->mask.match_criteria,
@ -284,7 +284,7 @@ TRACE_EVENT(mlx5_fs_add_rule,
TP_fast_assign(
__entry->rule = rule;
fs_get_obj(__entry->fte, rule->node.parent);
__entry->index = __entry->fte->dests_size - 1;
__entry->index = __entry->fte->act_dests.dests_size - 1;
__entry->sw_action = rule->sw_action;
memcpy(__entry->destination,
&rule->dest_attr,

View File

@ -1016,30 +1016,31 @@ err_rq_xdp_prog:
static void mlx5e_free_rq(struct mlx5e_rq *rq)
{
struct bpf_prog *old_prog;
if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
old_prog = rcu_dereference_protected(rq->xdp_prog,
lockdep_is_held(&rq->priv->state_lock));
if (old_prog)
bpf_prog_put(old_prog);
}
kvfree(rq->dim);
page_pool_destroy(rq->page_pool);
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_shampo(rq);
kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
mlx5e_free_mpwqe_rq_drop_page(rq);
mlx5e_rq_free_shampo(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
mlx5e_free_wqe_alloc_info(rq);
}
kvfree(rq->dim);
xdp_rxq_info_unreg(&rq->xdp_rxq);
page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
struct bpf_prog *old_prog;
old_prog = rcu_dereference_protected(rq->xdp_prog,
lockdep_is_held(&rq->priv->state_lock));
if (old_prog)
bpf_prog_put(old_prog);
}
xdp_rxq_info_unreg(&rq->xdp_rxq);
}
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter)

View File

@ -2346,6 +2346,9 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
stats->hds_nodata_packets++;
stats->hds_nodata_bytes += head_size;
}
} else {
stats->hds_nosplit_packets++;
stats->hds_nosplit_bytes += data_bcnt;
}
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);

View File

@ -144,6 +144,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
@ -347,6 +349,8 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_gro_large_hds += rq_stats->gro_large_hds;
s->rx_hds_nodata_packets += rq_stats->hds_nodata_packets;
s->rx_hds_nodata_bytes += rq_stats->hds_nodata_bytes;
s->rx_hds_nosplit_packets += rq_stats->hds_nosplit_packets;
s->rx_hds_nosplit_bytes += rq_stats->hds_nosplit_bytes;
s->rx_ecn_mark += rq_stats->ecn_mark;
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
@ -2062,6 +2066,8 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },

View File

@ -156,6 +156,8 @@ struct mlx5e_sw_stats {
u64 rx_gro_large_hds;
u64 rx_hds_nodata_packets;
u64 rx_hds_nodata_bytes;
u64 rx_hds_nosplit_packets;
u64 rx_hds_nosplit_bytes;
u64 rx_mcast_packets;
u64 rx_ecn_mark;
u64 rx_removed_vlan_packets;
@ -356,6 +358,8 @@ struct mlx5e_rq_stats {
u64 gro_large_hds;
u64 hds_nodata_packets;
u64 hds_nodata_bytes;
u64 hds_nosplit_packets;
u64 hds_nosplit_bytes;
u64 mcast_packets;
u64 ecn_mark;
u64 removed_vlan_packets;

View File

@ -896,7 +896,7 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
if (!mlx5_irq_pool_is_sf_pool(pool))
return comp_irq_request_pci(dev, vecidx);
af_desc.is_managed = 1;
af_desc.is_managed = false;
cpumask_copy(&af_desc.mask, cpu_online_mask);
cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
irq = mlx5_irq_affinity_request(dev, pool, &af_desc);

View File

@ -463,7 +463,7 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
int num_encap = 0;
*extended_dest = false;
if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
if (!(fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
@ -502,17 +502,17 @@ mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context)
execute_aso[0]);
MLX5_SET(execute_aso, execute_aso, valid, 1);
MLX5_SET(execute_aso, execute_aso, aso_object_id,
fte->action.exe_aso.object_id);
fte->act_dests.action.exe_aso.object_id);
exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id,
fte->action.exe_aso.return_reg_id);
fte->act_dests.action.exe_aso.return_reg_id);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type,
fte->action.exe_aso.type);
fte->act_dests.action.exe_aso.type);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color,
fte->action.exe_aso.flow_meter.init_color);
fte->act_dests.action.exe_aso.flow_meter.init_color);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id,
fte->action.exe_aso.flow_meter.meter_idx);
fte->act_dests.action.exe_aso.flow_meter.meter_idx);
}
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
@ -541,7 +541,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
else
dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->act_dests.dests_size * dst_cnt_size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@ -553,7 +553,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
MLX5_SET(set_fte_in, in, ignore_flow_level,
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
!!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport,
@ -563,23 +563,23 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag,
fte->flow_context.flow_tag);
fte->act_dests.flow_context.flow_tag);
MLX5_SET(flow_context, in_flow_context, flow_source,
fte->flow_context.flow_source);
fte->act_dests.flow_context.flow_source);
MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
!!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
!!(fte->act_dests.flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);
action = fte->action.action;
action = fte->act_dests.action.action;
if (extended_dest)
action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
MLX5_SET(flow_context, in_flow_context, action, action);
if (!extended_dest && fte->action.pkt_reformat) {
struct mlx5_pkt_reformat *pkt_reformat = fte->action.pkt_reformat;
if (!extended_dest && fte->act_dests.action.pkt_reformat) {
struct mlx5_pkt_reformat *pkt_reformat = fte->act_dests.action.pkt_reformat;
if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat);
@ -591,46 +591,46 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
goto err_out;
}
} else {
reformat_id = fte->action.pkt_reformat->id;
reformat_id = fte->act_dests.action.pkt_reformat->id;
}
}
MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id);
if (fte->action.modify_hdr) {
if (fte->action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
if (fte->act_dests.action.modify_hdr) {
if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
mlx5_core_err(dev, "Can't use SW-owned modify_hdr in FW-owned table\n");
err = -EOPNOTSUPP;
goto err_out;
}
MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_hdr->id);
fte->act_dests.action.modify_hdr->id);
}
MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type,
fte->action.crypto.type);
fte->act_dests.action.crypto.type);
MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id,
fte->action.crypto.obj_id);
fte->act_dests.action.crypto.obj_id);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[0].ethtype);
MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[0].vid);
MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[0].prio);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[1].ethtype);
MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[1].vid);
MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[1].prio);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, sizeof(fte->val));
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
int list_size = 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
@ -706,7 +706,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
log_max_flow_counter,
ft->type));
@ -731,8 +731,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
if (fte->act_dests.action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
mlx5_cmd_set_fte_flow_meter(fte, in_flow_context);
} else {
err = -EOPNOTSUPP;
@ -1071,7 +1071,7 @@ static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type)
{
return 0;
return MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH;
}
static const struct mlx5_flow_cmds mlx5_flow_cmds = {

View File

@ -124,4 +124,12 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode);
int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect);
static inline bool mlx5_fs_cmd_is_fw_term_table(struct mlx5_flow_table *ft)
{
if (ft->flags & MLX5_FLOW_TABLE_TERMINATION)
return true;
return false;
}
#endif

View File

@ -605,12 +605,37 @@ static void modify_fte(struct fs_fte *fte)
dev = get_dev(&fte->node);
root = find_root(&ft->node);
err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
err = root->cmds->update_fte(root, ft, fg, fte->act_dests.modify_mask, fte);
if (err)
mlx5_core_warn(dev,
"%s can't del rule fg id=%d fte_index=%d\n",
__func__, fg->id, fte->index);
fte->modify_mask = 0;
fte->act_dests.modify_mask = 0;
}
static void del_sw_hw_dup_rule(struct fs_node *node)
{
struct mlx5_flow_rule *rule;
struct fs_fte *fte;
fs_get_obj(rule, node);
fs_get_obj(fte, rule->node.parent);
trace_mlx5_fs_del_rule(rule);
if (is_fwd_next_action(rule->sw_action)) {
mutex_lock(&rule->dest_attr.ft->lock);
list_del(&rule->next_ft);
mutex_unlock(&rule->dest_attr.ft->lock);
}
/* If a pending rule is being deleted it means
* this is a NO APPEND rule, so there are no partial deletions,
* all the rules of the mlx5_flow_handle are going to be deleted
* and the rules aren't shared with any other mlx5_flow_handle instance
* so no need to do any bookkeeping like in del_sw_hw_rule().
*/
kfree(rule);
}
static void del_sw_hw_rule(struct fs_node *node)
@ -628,29 +653,29 @@ static void del_sw_hw_rule(struct fs_node *node)
}
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) {
--fte->dests_size;
fte->modify_mask |=
--fte->act_dests.dests_size;
fte->act_dests.modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
goto out;
}
if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
--fte->dests_size;
fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
--fte->act_dests.dests_size;
fte->act_dests.modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
goto out;
}
if (is_fwd_dest_type(rule->dest_attr.type)) {
--fte->dests_size;
--fte->fwd_dests;
--fte->act_dests.dests_size;
--fte->act_dests.fwd_dests;
if (!fte->fwd_dests)
fte->action.action &=
if (!fte->act_dests.fwd_dests)
fte->act_dests.action.action &=
~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte->modify_mask |=
fte->act_dests.modify_mask |=
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
goto out;
}
@ -658,12 +683,33 @@ out:
kfree(rule);
}
static void switch_to_pending_act_dests(struct fs_fte *fte)
{
struct fs_node *iter;
memcpy(&fte->act_dests, &fte->dup->act_dests, sizeof(fte->act_dests));
list_bulk_move_tail(&fte->node.children,
fte->dup->children.next,
fte->dup->children.prev);
list_for_each_entry(iter, &fte->node.children, list)
iter->del_sw_func = del_sw_hw_rule;
/* Make sure the fte isn't deleted
* as mlx5_del_flow_rules() decreases the refcount
* of the fte to trigger deletion.
*/
tree_get_node(&fte->node);
}
static void del_hw_fte(struct fs_node *node)
{
struct mlx5_flow_root_namespace *root;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct mlx5_core_dev *dev;
bool pending_used = false;
struct fs_fte *fte;
int err;
@ -672,16 +718,35 @@ static void del_hw_fte(struct fs_node *node)
fs_get_obj(ft, fg->node.parent);
trace_mlx5_fs_del_fte(fte);
WARN_ON(fte->dests_size);
WARN_ON(fte->act_dests.dests_size);
dev = get_dev(&ft->node);
root = find_root(&ft->node);
if (fte->dup && !list_empty(&fte->dup->children)) {
switch_to_pending_act_dests(fte);
pending_used = true;
} else {
/* Avoid double call to del_hw_fte */
node->del_hw_func = NULL;
}
if (node->active) {
err = root->cmds->delete_fte(root, ft, fte);
if (err)
mlx5_core_warn(dev,
"flow steering can't delete fte in index %d of flow group id %d\n",
fte->index, fg->id);
node->active = false;
if (pending_used) {
err = root->cmds->update_fte(root, ft, fg,
fte->act_dests.modify_mask, fte);
if (err)
mlx5_core_warn(dev,
"flow steering can't update to pending rule in index %d of flow group id %d\n",
fte->index, fg->id);
fte->act_dests.modify_mask = 0;
} else {
err = root->cmds->delete_fte(root, ft, fte);
if (err)
mlx5_core_warn(dev,
"flow steering can't delete fte in index %d of flow group id %d\n",
fte->index, fg->id);
node->active = false;
}
}
}
@ -700,6 +765,7 @@ static void del_sw_fte(struct fs_node *node)
rhash_fte);
WARN_ON(err);
ida_free(&fg->fte_allocator, fte->index - fg->start_index);
kvfree(fte->dup);
kmem_cache_free(steering->ftes_cache, fte);
}
@ -782,8 +848,8 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
memcpy(fte->val, &spec->match_value, sizeof(fte->val));
fte->node.type = FS_TYPE_FLOW_ENTRY;
fte->action = *flow_act;
fte->flow_context = spec->flow_context;
fte->act_dests.action = *flow_act;
fte->act_dests.flow_context = spec->flow_context;
tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
@ -1103,18 +1169,45 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
return err;
}
static bool rule_is_pending(struct fs_fte *fte, struct mlx5_flow_rule *rule)
{
struct mlx5_flow_rule *tmp_rule;
struct fs_node *iter;
if (!fte->dup || list_empty(&fte->dup->children))
return false;
list_for_each_entry(iter, &fte->dup->children, list) {
tmp_rule = container_of(iter, struct mlx5_flow_rule, node);
if (tmp_rule == rule)
return true;
}
return false;
}
static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
struct mlx5_flow_destination *dest)
{
struct mlx5_flow_root_namespace *root;
struct fs_fte_action *act_dests;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
bool pending = false;
struct fs_fte *fte;
int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
int err = 0;
fs_get_obj(fte, rule->node.parent);
if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
pending = rule_is_pending(fte, rule);
if (pending)
act_dests = &fte->dup->act_dests;
else
act_dests = &fte->act_dests;
if (!(act_dests->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return -EINVAL;
down_write_ref_node(&fte->node, false);
fs_get_obj(fg, fte->node.parent);
@ -1122,8 +1215,9 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
memcpy(&rule->dest_attr, dest, sizeof(*dest));
root = find_root(&ft->node);
err = root->cmds->update_fte(root, ft, fg,
modify_mask, fte);
if (!pending)
err = root->cmds->update_fte(root, ft, fg,
modify_mask, fte);
up_write_ref_node(&fte->node, false);
return err;
@ -1453,6 +1547,16 @@ static struct mlx5_flow_handle *alloc_handle(int num_rules)
return handle;
}
static void destroy_flow_handle_dup(struct mlx5_flow_handle *handle,
int i)
{
for (; --i >= 0;) {
list_del(&handle->rule[i]->node.list);
kfree(handle->rule[i]);
}
kfree(handle);
}
static void destroy_flow_handle(struct fs_fte *fte,
struct mlx5_flow_handle *handle,
struct mlx5_flow_destination *dest,
@ -1460,7 +1564,7 @@ static void destroy_flow_handle(struct fs_fte *fte,
{
for (; --i >= 0;) {
if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
fte->dests_size--;
fte->act_dests.dests_size--;
list_del(&handle->rule[i]->node.list);
kfree(handle->rule[i]);
}
@ -1468,6 +1572,61 @@ static void destroy_flow_handle(struct fs_fte *fte,
kfree(handle);
}
static struct mlx5_flow_handle *
create_flow_handle_dup(struct list_head *children,
struct mlx5_flow_destination *dest,
int dest_num,
struct fs_fte_action *act_dests)
{
static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
struct mlx5_flow_rule *rule = NULL;
struct mlx5_flow_handle *handle;
int i = 0;
int type;
handle = alloc_handle((dest_num) ? dest_num : 1);
if (!handle)
return NULL;
do {
rule = alloc_rule(dest + i);
if (!rule)
goto free_rules;
/* Add dest to dests list- we need flow tables to be in the
* end of the list for forward to next prio rules.
*/
tree_init_node(&rule->node, NULL, del_sw_hw_dup_rule);
if (dest &&
dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
list_add(&rule->node.list, children);
else
list_add_tail(&rule->node.list, children);
if (dest) {
act_dests->dests_size++;
if (is_fwd_dest_type(dest[i].type))
act_dests->fwd_dests++;
type = dest[i].type ==
MLX5_FLOW_DESTINATION_TYPE_COUNTER;
act_dests->modify_mask |= type ? count : dst;
}
handle->rule[i] = rule;
} while (++i < dest_num);
return handle;
free_rules:
destroy_flow_handle_dup(handle, i);
act_dests->dests_size = 0;
act_dests->fwd_dests = 0;
return NULL;
}
static struct mlx5_flow_handle *
create_flow_handle(struct fs_fte *fte,
struct mlx5_flow_destination *dest,
@ -1510,10 +1669,10 @@ create_flow_handle(struct fs_fte *fte,
else
list_add_tail(&rule->node.list, &fte->node.children);
if (dest) {
fte->dests_size++;
fte->act_dests.dests_size++;
if (is_fwd_dest_type(dest[i].type))
fte->fwd_dests++;
fte->act_dests.fwd_dests++;
type = dest[i].type ==
MLX5_FLOW_DESTINATION_TYPE_COUNTER;
@ -1774,17 +1933,17 @@ static int check_conflicting_ftes(struct fs_fte *fte,
const struct mlx5_flow_context *flow_context,
const struct mlx5_flow_act *flow_act)
{
if (check_conflicting_actions(flow_act, &fte->action)) {
if (check_conflicting_actions(flow_act, &fte->act_dests.action)) {
mlx5_core_warn(get_dev(&fte->node),
"Found two FTEs with conflicting actions\n");
return -EEXIST;
}
if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
fte->flow_context.flow_tag != flow_context->flow_tag) {
fte->act_dests.flow_context.flow_tag != flow_context->flow_tag) {
mlx5_core_warn(get_dev(&fte->node),
"FTE flow tag %u already exists with different flow tag %u\n",
fte->flow_context.flow_tag,
fte->act_dests.flow_context.flow_tag,
flow_context->flow_tag);
return -EEXIST;
}
@ -1808,12 +1967,12 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
if (ret)
return ERR_PTR(ret);
old_action = fte->action.action;
fte->action.action |= flow_act->action;
old_action = fte->act_dests.action.action;
fte->act_dests.action.action |= flow_act->action;
handle = add_rule_fte(fte, fg, dest, dest_num,
old_action != flow_act->action);
if (IS_ERR(handle)) {
fte->action.action = old_action;
fte->act_dests.action.action = old_action;
return handle;
}
trace_mlx5_fs_set_fte(fte, false);
@ -1961,6 +2120,62 @@ out:
return fte_tmp;
}
/* Native capability lacks support for adding an additional match with the same value
* to the same flow group. To accommodate the NO APPEND flag in these scenarios,
* we include the new rule in the existing flow table entry (fte) without immediate
* hardware commitment. When a request is made to delete the corresponding hardware rule,
* we then commit the pending rule to hardware.
*/
static struct mlx5_flow_handle *
add_rule_dup_match_fte(struct fs_fte *fte,
const struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
int dest_num)
{
struct mlx5_flow_handle *handle;
struct fs_fte_dup *dup;
int i = 0;
if (!fte->dup) {
dup = kvzalloc(sizeof(*dup), GFP_KERNEL);
if (!dup)
return ERR_PTR(-ENOMEM);
/* dup will be freed when the fte is freed
* this way we don't allocate / free dup on every rule deletion
* or creation
*/
INIT_LIST_HEAD(&dup->children);
fte->dup = dup;
}
if (!list_empty(&fte->dup->children)) {
mlx5_core_warn(get_dev(&fte->node),
"Can have only a single duplicate rule\n");
return ERR_PTR(-EEXIST);
}
fte->dup->act_dests.action = *flow_act;
fte->dup->act_dests.flow_context = spec->flow_context;
fte->dup->act_dests.dests_size = 0;
fte->dup->act_dests.fwd_dests = 0;
fte->dup->act_dests.modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
handle = create_flow_handle_dup(&fte->dup->children,
dest, dest_num,
&fte->dup->act_dests);
if (!handle)
return ERR_PTR(-ENOMEM);
for (i = 0; i < handle->num_rules; i++) {
tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]);
}
return handle;
}
static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct list_head *match_head,
@ -1971,6 +2186,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
int ft_version)
{
struct mlx5_flow_steering *steering = get_steering(&ft->node);
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct mlx5_flow_group *g;
struct mlx5_flow_handle *rule;
struct match_list *iter;
@ -1984,7 +2200,9 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
return ERR_PTR(-ENOMEM);
search_again_locked:
if (flow_act->flags & FLOW_ACT_NO_APPEND)
if (flow_act->flags & FLOW_ACT_NO_APPEND &&
(root->cmds->get_capabilities(root, root->table_type) &
MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH))
goto skip_search;
version = matched_fgs_get_version(match_head);
/* Try to find an fte with identical match value and attempt update its
@ -1997,7 +2215,10 @@ search_again_locked:
fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
if (!fte_tmp)
continue;
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
if (flow_act->flags & FLOW_ACT_NO_APPEND)
rule = add_rule_dup_match_fte(fte_tmp, spec, flow_act, dest, dest_num);
else
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
/* No error check needed here, because insert_fte() is not called */
up_write_ref_node(&fte_tmp->node, false);
tree_put_node(&fte_tmp->node, false);
@ -2265,12 +2486,10 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
tree_remove_node(&handle->rule[i]->node, true);
if (list_empty(&fte->node.children)) {
fte->node.del_hw_func(&fte->node);
/* Avoid double call to del_hw_fte */
fte->node.del_hw_func = NULL;
up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
} else if (fte->dests_size) {
if (fte->modify_mask)
} else if (fte->act_dests.dests_size) {
if (fte->act_dests.modify_mask)
modify_fte(fte);
up_write_ref_node(&fte->node, false);
} else {
@ -3590,8 +3809,8 @@ out:
}
EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
static struct mlx5_flow_root_namespace
*get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
struct mlx5_flow_root_namespace *
mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
{
struct mlx5_flow_namespace *ns;
@ -3614,7 +3833,7 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
struct mlx5_modify_hdr *modify_hdr;
int err;
root = get_root_namespace(dev, ns_type);
root = mlx5_get_root_namespace(dev, ns_type);
if (!root)
return ERR_PTR(-EOPNOTSUPP);
@ -3639,7 +3858,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root;
root = get_root_namespace(dev, modify_hdr->ns_type);
root = mlx5_get_root_namespace(dev, modify_hdr->ns_type);
if (WARN_ON(!root))
return;
root->cmds->modify_header_dealloc(root, modify_hdr);
@ -3655,7 +3874,7 @@ struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
struct mlx5_flow_root_namespace *root;
int err;
root = get_root_namespace(dev, ns_type);
root = mlx5_get_root_namespace(dev, ns_type);
if (!root)
return ERR_PTR(-EOPNOTSUPP);
@ -3681,7 +3900,7 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root;
root = get_root_namespace(dev, pkt_reformat->ns_type);
root = mlx5_get_root_namespace(dev, pkt_reformat->ns_type);
if (WARN_ON(!root))
return;
root->cmds->packet_reformat_dealloc(root, pkt_reformat);
@ -3703,7 +3922,7 @@ mlx5_create_match_definer(struct mlx5_core_dev *dev,
struct mlx5_flow_definer *definer;
int id;
root = get_root_namespace(dev, ns_type);
root = mlx5_get_root_namespace(dev, ns_type);
if (!root)
return ERR_PTR(-EOPNOTSUPP);
@ -3727,7 +3946,7 @@ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root;
root = get_root_namespace(dev, definer->ns_type);
root = mlx5_get_root_namespace(dev, definer->ns_type);
if (WARN_ON(!root))
return;

View File

@ -133,6 +133,7 @@ enum mlx5_flow_steering_capabilty {
MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0,
MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1,
MLX5_FLOW_STEERING_CAP_MATCH_RANGES = 1UL << 2,
MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH = 1UL << 3,
};
struct mlx5_flow_steering {
@ -230,20 +231,29 @@ struct mlx5_ft_underlay_qp {
MLX5_BYTE_OFF(fte_match_param, \
MLX5_FTE_MATCH_PARAM_RESERVED)))
struct fs_fte_action {
int modify_mask;
u32 dests_size;
u32 fwd_dests;
struct mlx5_flow_context flow_context;
struct mlx5_flow_act action;
};
struct fs_fte_dup {
struct list_head children;
struct fs_fte_action act_dests;
};
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
struct mlx5_fs_dr_rule fs_dr_rule;
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
u32 dests_size;
u32 fwd_dests;
struct fs_fte_action act_dests;
struct fs_fte_dup *dup;
u32 index;
struct mlx5_flow_context flow_context;
struct mlx5_flow_act action;
enum fs_fte_status status;
struct mlx5_fc *counter;
struct rhash_head hash;
int modify_mask;
};
/* Type of children is mlx5_flow_table/namespace */

View File

@ -26,6 +26,7 @@ struct mlx5_fw_reset {
struct work_struct reset_now_work;
struct work_struct reset_abort_work;
unsigned long reset_flags;
u8 reset_method;
struct timer_list timer;
struct completion done;
int ret;
@ -95,7 +96,7 @@ static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level,
}
static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
u8 *reset_type, u8 *reset_state)
u8 *reset_type, u8 *reset_state, u8 *reset_method)
{
u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
@ -111,13 +112,26 @@ static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
*reset_type = MLX5_GET(mfrl_reg, out, reset_type);
if (reset_state)
*reset_state = MLX5_GET(mfrl_reg, out, reset_state);
if (reset_method)
*reset_method = MLX5_GET(mfrl_reg, out, pci_reset_req_method);
return 0;
}
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
{
return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL);
return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL, NULL);
}
static int mlx5_fw_reset_get_reset_method(struct mlx5_core_dev *dev,
u8 *reset_method)
{
if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method)) {
*reset_method = MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE;
return 0;
}
return mlx5_reg_mfrl_query(dev, NULL, NULL, NULL, reset_method);
}
static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
@ -125,7 +139,7 @@ static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
{
u8 reset_state;
if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state))
if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state, NULL))
goto out;
if (!reset_state)
@ -398,7 +412,8 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
return 0;
}
static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
u8 reset_method)
{
u16 dev_id;
int err;
@ -409,9 +424,11 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
}
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
err = mlx5_check_hotplug_interrupt(dev);
if (err)
return false;
if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) {
err = mlx5_check_hotplug_interrupt(dev);
if (err)
return false;
}
#endif
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
@ -427,8 +444,12 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
struct mlx5_core_dev *dev = fw_reset->dev;
int err;
if (test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) ||
!mlx5_is_reset_now_capable(dev)) {
err = mlx5_fw_reset_get_reset_method(dev, &fw_reset->reset_method);
if (err)
mlx5_core_warn(dev, "Failed reading MFRL, err %d\n", err);
if (err || test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) ||
!mlx5_is_reset_now_capable(dev, fw_reset->reset_method)) {
err = mlx5_fw_reset_set_reset_sync_nack(dev);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Nack %s",
err ? "Failed" : "Sent");
@ -444,21 +465,15 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n");
}
static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev, u16 dev_id)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
struct pci_dev *bridge = bridge_bus->self;
unsigned long timeout;
struct pci_dev *sdev;
u16 reg16, dev_id;
int cap, err;
u16 reg16;
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
if (err)
return pcibios_err_to_errno(err);
err = mlx5_check_dev_ids(dev, dev_id);
if (err)
return err;
cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
if (!cap)
return -EOPNOTSUPP;
@ -528,6 +543,44 @@ restore:
return err;
}
static int mlx5_pci_reset_bus(struct mlx5_core_dev *dev)
{
if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method))
return -EOPNOTSUPP;
return pci_reset_bus(dev->pdev);
}
static int mlx5_sync_pci_reset(struct mlx5_core_dev *dev, u8 reset_method)
{
u16 dev_id;
int err;
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
if (err)
return pcibios_err_to_errno(err);
err = mlx5_check_dev_ids(dev, dev_id);
if (err)
return err;
switch (reset_method) {
case MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE:
err = mlx5_pci_link_toggle(dev, dev_id);
if (err)
mlx5_core_warn(dev, "mlx5_pci_link_toggle failed\n");
break;
case MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET:
err = mlx5_pci_reset_bus(dev);
if (err)
mlx5_core_warn(dev, "mlx5_pci_reset_bus failed\n");
break;
default:
return -EOPNOTSUPP;
}
return err;
}
static void mlx5_sync_reset_now_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
@ -546,9 +599,9 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
goto done;
}
err = mlx5_pci_link_toggle(dev);
err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
if (err) {
mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err);
mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, no reset done, err %d\n", err);
set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags);
}
@ -610,9 +663,9 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state);
if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
err = mlx5_pci_link_toggle(dev);
err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
if (err) {
mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, err %d\n", err);
mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n", err);
fw_reset->ret = err;
}
}

View File

@ -619,6 +619,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_with_driver_unload))
MLX5_SET(cmd_hca_cap, set_hca_cap,
pci_sync_for_fw_update_with_driver_unload, 1);
if (MLX5_CAP_GEN_MAX(dev, pcie_reset_using_hotreset_method))
MLX5_SET(cmd_hca_cap, set_hca_cap,
pcie_reset_using_hotreset_method, 1);
if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
MLX5_SET(cmd_hca_cap,

View File

@ -9,14 +9,6 @@
#include "fs_dr.h"
#include "dr_types.h"
static bool dr_is_fw_term_table(struct mlx5_flow_table *ft)
{
if (ft->flags & MLX5_FLOW_TABLE_TERMINATION)
return true;
return false;
}
static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 underlay_qpn,
@ -70,7 +62,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
u32 flags;
int err;
if (dr_is_fw_term_table(ft))
if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
ft_attr,
next_ft);
@ -110,7 +102,7 @@ static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5dr_action *action = ft->fs_dr_table.miss_action;
int err;
if (dr_is_fw_term_table(ft))
if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table);
@ -135,7 +127,7 @@ static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
if (dr_is_fw_term_table(ft))
if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
return set_miss_action(ns, ft, next_ft);
@ -154,7 +146,7 @@ static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
match_criteria_enable);
struct mlx5dr_match_parameters mask;
if (dr_is_fw_term_table(ft))
if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in,
fg);
@ -179,7 +171,7 @@ static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
if (dr_is_fw_term_table(ft))
if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher);
@ -279,7 +271,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
int err = 0;
int i;
if (dr_is_fw_term_table(ft))
if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
@ -306,12 +298,12 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
match_sz = sizeof(fte->val);
/* Drop reformat action bit if destination vport set with reformat */
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
if (!contain_vport_reformat_action(dst))
continue;
fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
break;
}
}
@ -321,7 +313,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
* TX: modify header -> push vlan -> encap
* RX: decap -> pop vlan -> modify header
*/
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
enum mlx5dr_action_reformat_type decap_type =
DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
@ -337,26 +329,26 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
bool is_decap;
if (fte->action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
if (fte->act_dests.action.pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
err = -EINVAL;
mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n");
goto free_actions;
}
is_decap = fte->action.pkt_reformat->reformat_type ==
is_decap = fte->act_dests.action.pkt_reformat->reformat_type ==
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
if (is_decap)
actions[num_actions++] =
fte->action.pkt_reformat->action.dr_action;
fte->act_dests.action.pkt_reformat->action.dr_action;
else
delay_encap_set = true;
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
tmp_action =
mlx5dr_action_create_pop_vlan();
if (!tmp_action) {
@ -367,7 +359,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
tmp_action =
mlx5dr_action_create_pop_vlan();
if (!tmp_action) {
@ -378,12 +370,12 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
actions[num_actions++] =
fte->action.modify_hdr->action.dr_action;
fte->act_dests.action.modify_hdr->action.dr_action;
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[0]);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@ -392,8 +384,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[1]);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@ -404,11 +396,11 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
if (delay_encap_set)
actions[num_actions++] =
fte->action.pkt_reformat->action.dr_action;
fte->act_dests.action.pkt_reformat->action.dr_action;
/* The order of the actions below is not important */
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
tmp_action = mlx5dr_action_create_drop();
if (!tmp_action) {
err = -ENOMEM;
@ -418,9 +410,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
term_actions[num_term_actions++].dest = tmp_action;
}
if (fte->flow_context.flow_tag) {
if (fte->act_dests.flow_context.flow_tag) {
tmp_action =
mlx5dr_action_create_tag(fte->flow_context.flow_tag);
mlx5dr_action_create_tag(fte->act_dests.flow_context.flow_tag);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@ -429,7 +421,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = tmp_action;
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
list_for_each_entry(dst, &fte->node.children, node.list) {
enum mlx5_flow_destination_type type = dst->dest_attr.type;
u32 id;
@ -510,7 +502,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
list_for_each_entry(dst, &fte->node.children, node.list) {
u32 id;
@ -537,19 +529,21 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
if (fte->action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
struct mlx5_flow_act *action = &fte->act_dests.action;
if (fte->act_dests.action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
err = -EOPNOTSUPP;
goto free_actions;
}
tmp_action =
mlx5dr_action_create_aso(domain,
fte->action.exe_aso.object_id,
fte->action.exe_aso.return_reg_id,
fte->action.exe_aso.type,
fte->action.exe_aso.flow_meter.init_color,
fte->action.exe_aso.flow_meter.meter_idx);
action->exe_aso.object_id,
action->exe_aso.return_reg_id,
action->exe_aso.type,
action->exe_aso.flow_meter.init_color,
action->exe_aso.flow_meter.meter_idx);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
@ -576,8 +570,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
actions[num_actions++] = term_actions->dest;
} else if (num_term_actions > 1) {
bool ignore_flow_level =
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
u32 flow_source = fte->flow_context.flow_source;
!!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
u32 flow_source = fte->act_dests.flow_context.flow_source;
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
@ -601,7 +595,7 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
&params,
num_actions,
actions,
fte->flow_context.flow_source);
fte->act_dests.flow_context.flow_source);
if (!rule) {
err = -EINVAL;
goto free_actions;
@ -740,7 +734,7 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
int err;
int i;
if (dr_is_fw_term_table(ft))
if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
err = mlx5dr_rule_destroy(rule->dr_rule);
@ -765,7 +759,7 @@ static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
struct fs_fte fte_tmp = {};
int ret;
if (dr_is_fw_term_table(ft))
if (mlx5_fs_cmd_is_fw_term_table(ft))
return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte);
/* Backup current dr rule details */
@ -819,11 +813,11 @@ static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type)
{
u32 steering_caps = 0;
u32 steering_caps = MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH;
if (ft_type != FS_FT_FDB ||
MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5)
return 0;
return steering_caps;
steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX;
steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;

View File

@ -967,7 +967,7 @@ int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
ret = hws_matcher_check_and_process_at(matcher, at);
if (ret)
return -ret;
return ret;
required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) {

View File

@ -751,11 +751,11 @@ int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
ret = hws_rule_enqueue_precheck(rule, attr);
if (unlikely(ret))
return -ret;
return ret;
ret = hws_rule_destroy_hws(rule, attr);
return -ret;
return ret;
}
int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
@ -767,7 +767,7 @@ int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
ret = hws_rule_enqueue_precheck_update(rule, attr);
if (unlikely(ret))
return -ret;
return ret;
ret = hws_rule_create_hws(rule,
attr,
@ -776,5 +776,5 @@ int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
at_idx,
rule_actions);
return -ret;
return ret;
}

View File

@ -489,5 +489,5 @@ int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
return 0;
out:
mutex_unlock(&ctx->ctrl_lock);
return -ret;
return ret;
}

View File

@ -1449,6 +1449,7 @@ enum {
MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
MLX5_CMD_STAT_RES_BUSY = 0x6,
MLX5_CMD_STAT_NOT_READY = 0x7,
MLX5_CMD_STAT_LIM_ERR = 0x8,
MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
MLX5_CMD_STAT_IX_ERR = 0xa,

View File

@ -342,4 +342,7 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
struct mlx5_pkt_reformat *reformat);
u32 mlx5_flow_table_id(struct mlx5_flow_table *ft);
struct mlx5_flow_root_namespace *
mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type);
#endif

View File

@ -1856,7 +1856,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_328[0x2];
u8 relaxed_ordering_read[0x1];
u8 log_max_pd[0x5];
u8 reserved_at_330[0x6];
u8 reserved_at_330[0x5];
u8 pcie_reset_using_hotreset_method[0x1];
u8 pci_sync_for_fw_update_with_driver_unload[0x1];
u8 vnic_env_cnt_steering_fail[0x1];
u8 vport_counter_local_loopback[0x1];
@ -11188,6 +11189,11 @@ struct mlx5_ifc_mcda_reg_bits {
u8 data[][0x20];
};
enum {
MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE = 0,
MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET = 1,
};
enum {
MLX5_MFRL_REG_RESET_STATE_IDLE = 0,
MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1,
@ -11215,7 +11221,8 @@ struct mlx5_ifc_mfrl_reg_bits {
u8 pci_sync_for_fw_update_start[0x1];
u8 pci_sync_for_fw_update_resp[0x2];
u8 rst_type_sel[0x3];
u8 reserved_at_28[0x4];
u8 pci_reset_req_method[0x3];
u8 reserved_at_2b[0x1];
u8 reset_state[0x4];
u8 reset_type[0x8];
u8 reset_level[0x8];