Merge branch 'mlx5-misc-fixes-2024-07-30'

Tariq Toukan says:

====================
mlx5 misc fixes 2024-07-30

This patchset provides misc bug fixes from the team to the mlx5 core and
Eth drivers.
====================

Link: https://patch.msgid.link/20240730061638.1831002-1-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-07-31 18:04:53 -07:00
commit fe16667f29
9 changed files with 26 additions and 11 deletions

View File

@ -932,6 +932,7 @@ err_rule:
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
err_mod_hdr:
*attr = *old_attr;
kfree(old_attr);
err_attr:
kvfree(spec);

View File

@ -51,9 +51,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
if (IS_ENABLED(CONFIG_MLX5_CLS_ACT) &&
((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)))
caps |= MLX5_IPSEC_CAP_PRIO;
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,

View File

@ -1409,7 +1409,12 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
if (!an_changes && link_modes == eproto.admin)
goto out;
mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
err = mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
if (err) {
netdev_err(priv->netdev, "%s: failed to set ptys reg: %d\n", __func__, err);
goto out;
}
mlx5_toggle_port_link(mdev);
out:

View File

@ -207,6 +207,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
struct devlink *devlink = priv_to_devlink(dev);
/* if this is the driver that initiated the fw reset, devlink completed the reload */
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
@ -218,9 +219,11 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unload
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
else
mlx5_load_one(dev, true);
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
devl_lock(devlink);
devlink_remote_reload_actions_performed(devlink, 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
devl_unlock(devlink);
}
}

View File

@ -48,6 +48,7 @@ static struct mlx5_irq *
irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
{
struct irq_affinity_desc auto_desc = {};
struct mlx5_irq *irq;
u32 irq_index;
int err;
@ -64,9 +65,12 @@ irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_de
else
cpu_get(pool, cpumask_first(&af_desc->mask));
}
return mlx5_irq_alloc(pool, irq_index,
cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
NULL);
irq = mlx5_irq_alloc(pool, irq_index,
cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
NULL);
if (IS_ERR(irq))
xa_erase(&pool->irqs, irq_index);
return irq;
}
/* Looking for the IRQ with the smallest refcount that fits req_mask.

View File

@ -1538,7 +1538,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
goto unlock;
for (i = 0; i < ldev->ports; i++) {
if (ldev->pf[MLX5_LAG_P1].netdev == slave) {
if (ldev->pf[i].netdev == slave) {
port = i;
break;
}

View File

@ -2142,7 +2142,6 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
/* Panic tear down fw command will stop the PCI bus communication
* with the HCA, so the health poll is no longer needed.
*/
mlx5_drain_health_wq(dev);
mlx5_stop_health_poll(dev, false);
ret = mlx5_cmd_fast_teardown_hca(dev);
@ -2177,6 +2176,7 @@ static void shutdown(struct pci_dev *pdev)
mlx5_core_info(dev, "Shutdown was called\n");
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
mlx5_drain_health_wq(dev);
err = mlx5_try_fast_unload(dev);
if (err)
mlx5_unload_one(dev, false);

View File

@ -112,6 +112,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
struct mlx5_core_dev *mdev = sf_dev->mdev;
set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
mlx5_drain_health_wq(mdev);
mlx5_unload_one(mdev, false);
}

View File

@ -7,7 +7,7 @@
/* don't try to optimize STE allocation if the stack is too constaraining */
#define DR_RULE_MAX_STES_OPTIMIZED 0
#else
#define DR_RULE_MAX_STES_OPTIMIZED 5
#define DR_RULE_MAX_STES_OPTIMIZED 2
#endif
#define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)