mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 22:51:42 +00:00
mlx5-fixes-2018-05-10
-----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJa9NFvAAoJEEg/ir3gV/o+vNoIAM/5zfT9f2iu6uNADcrFFfYY bdHY/psg9qDAjqZxmFYezcwdOrPY5GxLn+1VVZPLfwCir/qupTOO2skLRyAKEau0 uKSP45LD6E+M0Sew+15//sEB3J2JzcjJsNd61lzdl+3GKT/Nr/ZGY0K8iFXItdc3 Ye/vsL1IRNaosl4dnAGzOylGeit2VeUkmS/JrFRVqFjVLu78zxEuLHdnIZApt+4W lwpLnsplhUbPk6lwHNNureSuzQq4SXMLWIB+v1uxzHOSSZT8nkrr4/ew/BYHp1oo EteRykl4x3SkDbPcTBeElvpb52nduC6jgn8auVXOY9XZDmqX+rxhfBh3fBVFYB8= =FwjF -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2018-05-10' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== Mellanox, mlx5 fixes 2018-05-10 the following series includes some fixes for mlx5 core driver. Please pull and let me know if there's any problem. For -stable v4.5 ("net/mlx5: E-Switch, Include VF RDMA stats in vport statistics") For -stable v4.10 ("net/mlx5e: Err if asked to offload TC match on frag being first") ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
5ae4bbf769
@ -1261,6 +1261,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
||||
f->mask);
|
||||
addr_type = key->addr_type;
|
||||
|
||||
/* the HW doesn't support frag first/later */
|
||||
if (mask->flags & FLOW_DIS_FIRST_FRAG)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
|
||||
|
@ -34,6 +34,9 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/mlx5/driver.h>
|
||||
#include <linux/mlx5/cmd.h>
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
#include <linux/cpu_rmap.h>
|
||||
#endif
|
||||
#include "mlx5_core.h"
|
||||
#include "fpga/core.h"
|
||||
#include "eswitch.h"
|
||||
@ -923,3 +926,28 @@ int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
|
||||
MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
|
||||
}
|
||||
|
||||
/* This function should only be called after mlx5_cmd_force_teardown_hca */
|
||||
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_eq_table *table = &dev->priv.eq_table;
|
||||
struct mlx5_eq *eq;
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
if (dev->rmap) {
|
||||
free_irq_cpu_rmap(dev->rmap);
|
||||
dev->rmap = NULL;
|
||||
}
|
||||
#endif
|
||||
list_for_each_entry(eq, &table->comp_eqs_list, list)
|
||||
free_irq(eq->irqn, eq);
|
||||
|
||||
free_irq(table->pages_eq.irqn, &table->pages_eq);
|
||||
free_irq(table->async_eq.irqn, &table->async_eq);
|
||||
free_irq(table->cmd_eq.irqn, &table->cmd_eq);
|
||||
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
|
||||
if (MLX5_CAP_GEN(dev, pg))
|
||||
free_irq(table->pfault_eq.irqn, &table->pfault_eq);
|
||||
#endif
|
||||
pci_free_irq_vectors(dev->pdev);
|
||||
}
|
||||
|
@ -2175,26 +2175,35 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
|
||||
memset(vf_stats, 0, sizeof(*vf_stats));
|
||||
vf_stats->rx_packets =
|
||||
MLX5_GET_CTR(out, received_eth_unicast.packets) +
|
||||
MLX5_GET_CTR(out, received_ib_unicast.packets) +
|
||||
MLX5_GET_CTR(out, received_eth_multicast.packets) +
|
||||
MLX5_GET_CTR(out, received_ib_multicast.packets) +
|
||||
MLX5_GET_CTR(out, received_eth_broadcast.packets);
|
||||
|
||||
vf_stats->rx_bytes =
|
||||
MLX5_GET_CTR(out, received_eth_unicast.octets) +
|
||||
MLX5_GET_CTR(out, received_ib_unicast.octets) +
|
||||
MLX5_GET_CTR(out, received_eth_multicast.octets) +
|
||||
MLX5_GET_CTR(out, received_ib_multicast.octets) +
|
||||
MLX5_GET_CTR(out, received_eth_broadcast.octets);
|
||||
|
||||
vf_stats->tx_packets =
|
||||
MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
|
||||
MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
|
||||
MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
|
||||
MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
|
||||
MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
|
||||
|
||||
vf_stats->tx_bytes =
|
||||
MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
|
||||
MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
|
||||
MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
|
||||
MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
|
||||
MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
|
||||
|
||||
vf_stats->multicast =
|
||||
MLX5_GET_CTR(out, received_eth_multicast.packets);
|
||||
MLX5_GET_CTR(out, received_eth_multicast.packets) +
|
||||
MLX5_GET_CTR(out, received_ib_multicast.packets);
|
||||
|
||||
vf_stats->broadcast =
|
||||
MLX5_GET_CTR(out, received_eth_broadcast.packets);
|
||||
|
@ -1587,6 +1587,14 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
|
||||
|
||||
mlx5_enter_error_state(dev, true);
|
||||
|
||||
/* Some platforms requiring freeing the IRQ's in the shutdown
|
||||
* flow. If they aren't freed they can't be allocated after
|
||||
* kexec. There is no need to cleanup the mlx5_core software
|
||||
* contexts.
|
||||
*/
|
||||
mlx5_irq_clear_affinity_hints(dev);
|
||||
mlx5_core_eq_free_irqs(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -128,6 +128,8 @@ int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
|
||||
u32 *out, int outlen);
|
||||
int mlx5_start_eqs(struct mlx5_core_dev *dev);
|
||||
void mlx5_stop_eqs(struct mlx5_core_dev *dev);
|
||||
/* This function should only be called after mlx5_cmd_force_teardown_hca */
|
||||
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
|
||||
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
|
||||
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq);
|
||||
void mlx5_cq_tasklet_cb(unsigned long data);
|
||||
|
Loading…
Reference in New Issue
Block a user