mlx5-fixes-2023-03-15

-----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmQSS6oACgkQSD+KveBX
 +j4fGAf/SYSJAhheU+YbxoRubR9vgAiiu0RLhTXhjh6C8xkSjwzm0GuFMfJTOTRD
 v3wmhE6Igyof+CVb1EdABTHwGrpBJH+HgWuD4OXlkGjnXtAUAxWetM+kb0TcctLJ
 R+tY7ju7i6MNhgjpxiFuKeuxY20wh6kYGuBykcKhcw4YgDInu3D+F91tPodCHr+B
 qAPJOusaqD4kwR4ozhxo0cs71OoA3gElLqMXUOiOcqMOB70JfJGKwrI975RDiEr0
 yv3eDioHWYNVqntPkkZOjTrEiv1SxytbIS8wntYw7bgpA2/LoY+TQR0x/YJahPhb
 7ad3EyabnnFRqiQVPzif2h1BgZVIzA==
 =K4VQ
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-fixes-2023-03-15' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2023-03-15

This series provides bug fixes to mlx5 driver.

* tag 'mlx5-fixes-2023-03-15' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: TC, Remove error message log print
  net/mlx5e: TC, fix cloned flow attribute
  net/mlx5e: TC, fix missing error code
  net/sched: TC, fix raw counter initialization
  net/mlx5e: Lower maximum allowed MTU in XSK to match XDP prerequisites
  net/mlx5: Set BREAK_FW_WAIT flag first when removing driver
  net/mlx5e: kTLS, Fix missing error unwind on unsupported cipher type
  net/mlx5e: Fix cleanup null-ptr deref on encap lock
  net/mlx5: E-switch, Fix missing set of split_count when forward to ovs internal port
  net/mlx5: E-switch, Fix wrong usage of source port rewrite in split rules
  net/mlx5: Disable eswitch before waiting for VF pages
  net/mlx5: Fix setting ec_function bit in MANAGE_PAGES
  net/mlx5e: Don't cache tunnel offloads capability
  net/mlx5e: Fix macsec ASO context alignment
====================

Link: https://lore.kernel.org/r/20230315225847.360083-1-saeed@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2023-03-16 21:13:05 -07:00
commit 0d2be75ce7
13 changed files with 81 additions and 47 deletions

View File

@ -313,7 +313,6 @@ struct mlx5e_params {
} channel;
} mqprio;
bool rx_cqe_compress_def;
bool tunneled_offload_en;
struct dim_cq_moder rx_cq_moderation;
struct dim_cq_moder tx_cq_moderation;
struct mlx5e_packet_merge_param packet_merge;

View File

@ -178,7 +178,6 @@ tc_act_police_stats(struct mlx5e_priv *priv,
meter = mlx5e_tc_meter_get(priv->mdev, &params);
if (IS_ERR(meter)) {
NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
return PTR_ERR(meter);
}

View File

@ -64,6 +64,7 @@ mlx5e_tc_act_stats_add(struct mlx5e_tc_act_stats_handle *handle,
{
struct mlx5e_tc_act_stats *act_stats, *old_act_stats;
struct rhashtable *ht = &handle->ht;
u64 lastused;
int err = 0;
act_stats = kvzalloc(sizeof(*act_stats), GFP_KERNEL);
@ -73,6 +74,10 @@ mlx5e_tc_act_stats_add(struct mlx5e_tc_act_stats_handle *handle,
act_stats->tc_act_cookie = act_cookie;
act_stats->counter = counter;
mlx5_fc_query_cached_raw(counter,
&act_stats->lastbytes,
&act_stats->lastpackets, &lastused);
rcu_read_lock();
old_act_stats = rhashtable_lookup_get_insert_fast(ht,
&act_stats->hash,

View File

@ -621,15 +621,6 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
if (unlikely(!priv_rx))
return -ENOMEM;
dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
if (IS_ERR(dek)) {
err = PTR_ERR(dek);
goto err_create_key;
}
priv_rx->dek = dek;
INIT_LIST_HEAD(&priv_rx->list);
spin_lock_init(&priv_rx->lock);
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
priv_rx->crypto_info.crypto_info_128 =
@ -642,9 +633,20 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
default:
WARN_ONCE(1, "Unsupported cipher type %u\n",
crypto_info->cipher_type);
return -EOPNOTSUPP;
err = -EOPNOTSUPP;
goto err_cipher_type;
}
dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
if (IS_ERR(dek)) {
err = PTR_ERR(dek);
goto err_cipher_type;
}
priv_rx->dek = dek;
INIT_LIST_HEAD(&priv_rx->list);
spin_lock_init(&priv_rx->lock);
rxq = mlx5e_ktls_sk_get_rxq(sk);
priv_rx->rxq = rxq;
priv_rx->sk = sk;
@ -677,7 +679,7 @@ err_post_wqes:
mlx5e_tir_destroy(&priv_rx->tir);
err_create_tir:
mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_rx->dek);
err_create_key:
err_cipher_type:
kfree(priv_rx);
return err;
}

View File

@ -469,14 +469,6 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
if (IS_ERR(priv_tx))
return PTR_ERR(priv_tx);
dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
if (IS_ERR(dek)) {
err = PTR_ERR(dek);
goto err_create_key;
}
priv_tx->dek = dek;
priv_tx->expected_seq = start_offload_tcp_sn;
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
priv_tx->crypto_info.crypto_info_128 =
@ -489,8 +481,18 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
default:
WARN_ONCE(1, "Unsupported cipher type %u\n",
crypto_info->cipher_type);
return -EOPNOTSUPP;
err = -EOPNOTSUPP;
goto err_pool_push;
}
dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
if (IS_ERR(dek)) {
err = PTR_ERR(dek);
goto err_pool_push;
}
priv_tx->dek = dek;
priv_tx->expected_seq = start_offload_tcp_sn;
priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
@ -500,7 +502,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
return 0;
err_create_key:
err_pool_push:
pool_push(pool, priv_tx);
return err;
}

View File

@ -89,8 +89,8 @@ struct mlx5e_macsec_rx_sc {
};
struct mlx5e_macsec_umr {
u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
dma_addr_t dma_addr;
u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
u32 mkey;
};

View File

@ -4169,13 +4169,17 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
struct xsk_buff_pool *xsk_pool =
mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
struct mlx5e_xsk_param xsk;
int max_xdp_mtu;
if (!xsk_pool)
continue;
mlx5e_build_xsk_param(xsk_pool, &xsk);
max_xdp_mtu = mlx5e_xdp_max_mtu(new_params, &xsk);
if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
/* Validate XSK params and XDP MTU in advance */
if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev) ||
new_params->sw_mtu > max_xdp_mtu) {
u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
int max_mtu_frame, max_mtu_page, max_mtu;
@ -4185,9 +4189,9 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
*/
max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
max_mtu_page = MLX5E_HW2SW_MTU(new_params, SKB_MAX_HEAD(0));
max_mtu = min(max_mtu_frame, max_mtu_page);
max_mtu = min3(max_mtu_frame, max_mtu_page, max_xdp_mtu);
netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n",
netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u or its redirection XDP program. Try MTU <= %d\n",
new_params->sw_mtu, ix, max_mtu);
return false;
}
@ -4979,8 +4983,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
/* TX inline */
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
params->tunneled_offload_en = mlx5_tunnel_inner_ft_supported(mdev);
/* AF_XDP */
params->xsk = xsk;
@ -5285,7 +5287,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
}
features = MLX5E_RX_RES_FEATURE_PTP;
if (priv->channels.params.tunneled_offload_en)
if (mlx5_tunnel_inner_ft_supported(mdev))
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
priv->max_nch, priv->drop_rq.rqn,

View File

@ -755,7 +755,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
params->mqprio.num_tc = 1;
params->tunneled_offload_en = false;
if (rep->vport != MLX5_VPORT_UPLINK)
params->vlan_strip_disable = true;

View File

@ -3752,7 +3752,7 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
parse_attr->filter_dev = attr->parse_attr->filter_dev;
attr2->action = 0;
attr2->counter = NULL;
attr->tc_act_cookies_count = 0;
attr2->tc_act_cookies_count = 0;
attr2->flags = 0;
attr2->parse_attr = parse_attr;
attr2->dest_chain = 0;
@ -4304,6 +4304,7 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
esw_attr->dest_int_port = dest_int_port;
esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
esw_attr->split_count = out_index;
/* Forward to root fdb for matching against the new source vport */
attr->dest_chain = 0;
@ -5304,8 +5305,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs));
tc->action_stats_handle = mlx5e_tc_act_stats_create();
if (IS_ERR(tc->action_stats_handle))
if (IS_ERR(tc->action_stats_handle)) {
err = PTR_ERR(tc->action_stats_handle);
goto err_act_stats;
}
return 0;
@ -5440,8 +5443,10 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
}
uplink_priv->action_stats_handle = mlx5e_tc_act_stats_create();
if (IS_ERR(uplink_priv->action_stats_handle))
if (IS_ERR(uplink_priv->action_stats_handle)) {
err = PTR_ERR(uplink_priv->action_stats_handle);
goto err_action_counter;
}
return 0;
@ -5463,6 +5468,16 @@ err_tun_mapping:
void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
{
struct mlx5e_rep_priv *rpriv;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
priv = netdev_priv(rpriv->netdev);
esw = priv->mdev->priv.eswitch;
mlx5e_tc_clean_fdb_peer_flows(esw);
mlx5e_tc_tun_cleanup(uplink_priv->encap);
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);

View File

@ -723,11 +723,11 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < esw_attr->split_count; i++) {
if (esw_is_indir_table(esw, attr))
err = esw_setup_indir_table(dest, &flow_act, esw, attr, false, &i);
else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
&i);
if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
/* Source port rewrite (forward to ovs internal port or statck device) isn't
* supported in the rule of split action.
*/
err = -EOPNOTSUPP;
else
esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);

View File

@ -70,7 +70,6 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
params->tunneled_offload_en = false;
/* CQE compression is not supported for IPoIB */
params->rx_cqe_compress_def = false;

View File

@ -1364,8 +1364,8 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
{
mlx5_devlink_traps_unregister(priv_to_devlink(dev));
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_sriov_detach(dev);
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
mlx5_sf_hw_table_destroy(dev);
@ -1789,11 +1789,11 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev);
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
/* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
* fw_reset before unregistering the devlink.
*/
mlx5_drain_fw_reset(dev);
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
devlink_unregister(devlink);
mlx5_sriov_disable(pdev);
mlx5_crdump_disable(dev);

View File

@ -82,6 +82,16 @@ static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_funct
return func_id <= mlx5_core_max_vfs(dev) ? MLX5_VF : MLX5_SF;
}
static u32 mlx5_get_ec_function(u32 function)
{
return function >> 16;
}
static u32 mlx5_get_func_id(u32 function)
{
return function & 0xffff;
}
static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
@ -665,20 +675,22 @@ static int optimal_reclaimed_pages(void)
}
static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
struct rb_root *root, u16 func_id)
struct rb_root *root, u32 function)
{
u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
unsigned long end = jiffies + recl_pages_to_jiffies;
while (!RB_EMPTY_ROOT(root)) {
u32 ec_function = mlx5_get_ec_function(function);
u32 function_id = mlx5_get_func_id(function);
int nclaimed;
int err;
err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
&nclaimed, false, mlx5_core_is_ecpf(dev));
err = reclaim_pages(dev, function_id, optimal_reclaimed_pages(),
&nclaimed, false, ec_function);
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
err, func_id);
mlx5_core_warn(dev, "reclaim_pages err (%d) func_id=0x%x ec_func=0x%x\n",
err, function_id, ec_function);
return err;
}