Merge branch 'mlx5-next'

Or Gerlitz says:

====================
mlx5: Add Interface Step Sequence ID support

ISSI (Interface Step Sequence ID) defines the step sequence ID of the
interface between the driver to the firmware and is incremented by
steps of one. ISSI is used to enable deprecating/modifying features,
command interfaces and such, while maintaining compatibility.

As the driver serves both ConnectIB (CIB) and ConnectX4, we carefully
made sure that the IB functionality keeps running also on older CIB
firmware releases that don't support ISSI.

The Ethernet functionailty is available only on ConnectX4 where all
firmware releases support the feature since the very basic ISSI level.
So at this point no need for compatility code there.

As done prior to this series, when the Ethernet functionlity is enabled,
during the initialization flow, the core driver performs a query of the
supported ISSIs using the QUERY_ISSI command, and then, if ISSI is supported,
sets the actual issi value informing the firmware on which ISSI level to run,
using SET_ISSI command.

Previously, the IB driver wasn't ready to work on that mode, and hence
building both the IB driver and the Ethernet functionality in the core
driver were disallowed by Kconfigs, with this series, we allow users to
enable them both.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-06-04 16:41:02 -07:00
commit bb62f791f9
24 changed files with 1819 additions and 356 deletions

View File

@ -137,3 +137,300 @@ out:
kfree(out_mad);
return err;
}
int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
struct ib_smp *out_mad)
{
struct ib_smp *in_mad = NULL;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
if (!in_mad)
return -ENOMEM;
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
out_mad);
kfree(in_mad);
return err;
}
int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid)
{
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!out_mad)
return -ENOMEM;
err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
if (err)
goto out;
memcpy(sys_image_guid, out_mad->data + 4, 8);
out:
kfree(out_mad);
return err;
}
int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
u16 *max_pkeys)
{
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!out_mad)
return -ENOMEM;
err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
if (err)
goto out;
*max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
out:
kfree(out_mad);
return err;
}
int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
u32 *vendor_id)
{
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!out_mad)
return -ENOMEM;
err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
if (err)
goto out;
*vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff;
out:
kfree(out_mad);
return err;
}
int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
memcpy(node_desc, out_mad->data, 64);
out:
kfree(in_mad);
kfree(out_mad);
return err;
}
int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
memcpy(node_guid, out_mad->data + 12, 8);
out:
kfree(in_mad);
kfree(out_mad);
return err;
}
int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
out_mad);
if (err)
goto out;
*pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
out:
kfree(in_mad);
kfree(out_mad);
return err;
}
int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
out_mad);
if (err)
goto out;
memcpy(gid->raw, out_mad->data + 8, 8);
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
out_mad);
if (err)
goto out;
memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
out:
kfree(in_mad);
kfree(out_mad);
return err;
}
int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int ext_active_speed;
int err = -ENOMEM;
if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
mlx5_ib_warn(dev, "invalid port number %d\n", port);
return -EINVAL;
}
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
memset(props, 0, sizeof(*props));
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
if (err) {
mlx5_ib_warn(dev, "err %d\n", err);
goto out;
}
props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16));
props->lmc = out_mad->data[34] & 0x7;
props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18));
props->sm_sl = out_mad->data[36] & 0xf;
props->state = out_mad->data[32] & 0xf;
props->phys_state = out_mad->data[33] >> 4;
props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
props->gid_tbl_len = out_mad->data[50];
props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len;
props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
props->active_width = out_mad->data[31] & 0xf;
props->active_speed = out_mad->data[35] >> 4;
props->max_mtu = out_mad->data[41] & 0xf;
props->active_mtu = out_mad->data[36] >> 4;
props->subnet_timeout = out_mad->data[51] & 0x1f;
props->max_vl_num = out_mad->data[37] >> 4;
props->init_type_reply = out_mad->data[41] >> 4;
/* Check if extended speeds (EDR/FDR/...) are supported */
if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
ext_active_speed = out_mad->data[62] >> 4;
switch (ext_active_speed) {
case 1:
props->active_speed = 16; /* FDR */
break;
case 2:
props->active_speed = 32; /* EDR */
break;
}
}
/* If reported active speed is QDR, check if is FDR-10 */
if (props->active_speed == 4) {
if (mdev->port_caps[port - 1].ext_port_cap &
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
init_query_mad(in_mad);
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
err = mlx5_MAD_IFC(dev, 1, 1, port,
NULL, NULL, in_mad, out_mad);
if (err)
goto out;
/* Checking LinkSpeedActive for FDR-10 */
if (out_mad->data[15] & 0x1)
props->active_speed = 8;
}
}
out:
kfree(in_mad);
kfree(out_mad);
return err;
}

View File

@ -40,6 +40,7 @@
#include <linux/io-mapping.h>
#include <linux/sched.h>
#include <rdma/ib_user_verbs.h>
#include <linux/mlx5/vport.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include "user.h"
@ -62,30 +63,168 @@ static char mlx5_version[] =
DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
static enum rdma_link_layer
mlx5_ib_port_link_layer(struct ib_device *device)
{
struct mlx5_ib_dev *dev = to_mdev(device);
switch (MLX5_CAP_GEN(dev->mdev, port_type)) {
case MLX5_CAP_PORT_TYPE_IB:
return IB_LINK_LAYER_INFINIBAND;
case MLX5_CAP_PORT_TYPE_ETH:
return IB_LINK_LAYER_ETHERNET;
default:
return IB_LINK_LAYER_UNSPECIFIED;
}
}
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
{
return !dev->mdev->issi;
}
enum {
MLX5_VPORT_ACCESS_METHOD_MAD,
MLX5_VPORT_ACCESS_METHOD_HCA,
MLX5_VPORT_ACCESS_METHOD_NIC,
};
static int mlx5_get_vport_access_method(struct ib_device *ibdev)
{
if (mlx5_use_mad_ifc(to_mdev(ibdev)))
return MLX5_VPORT_ACCESS_METHOD_MAD;
if (mlx5_ib_port_link_layer(ibdev) ==
IB_LINK_LAYER_ETHERNET)
return MLX5_VPORT_ACCESS_METHOD_NIC;
return MLX5_VPORT_ACCESS_METHOD_HCA;
}
static int mlx5_query_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
u64 tmp;
int err;
switch (mlx5_get_vport_access_method(ibdev)) {
case MLX5_VPORT_ACCESS_METHOD_MAD:
return mlx5_query_mad_ifc_system_image_guid(ibdev,
sys_image_guid);
case MLX5_VPORT_ACCESS_METHOD_HCA:
err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
if (!err)
*sys_image_guid = cpu_to_be64(tmp);
return err;
default:
return -EINVAL;
}
}
static int mlx5_query_max_pkeys(struct ib_device *ibdev,
u16 *max_pkeys)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
switch (mlx5_get_vport_access_method(ibdev)) {
case MLX5_VPORT_ACCESS_METHOD_MAD:
return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
case MLX5_VPORT_ACCESS_METHOD_HCA:
case MLX5_VPORT_ACCESS_METHOD_NIC:
*max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
pkey_table_size));
return 0;
default:
return -EINVAL;
}
}
static int mlx5_query_vendor_id(struct ib_device *ibdev,
u32 *vendor_id)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
switch (mlx5_get_vport_access_method(ibdev)) {
case MLX5_VPORT_ACCESS_METHOD_MAD:
return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
case MLX5_VPORT_ACCESS_METHOD_HCA:
case MLX5_VPORT_ACCESS_METHOD_NIC:
return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
default:
return -EINVAL;
}
}
static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
__be64 *node_guid)
{
u64 tmp;
int err;
switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
case MLX5_VPORT_ACCESS_METHOD_MAD:
return mlx5_query_mad_ifc_node_guid(dev, node_guid);
case MLX5_VPORT_ACCESS_METHOD_HCA:
err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
if (!err)
*node_guid = cpu_to_be64(tmp);
return err;
default:
return -EINVAL;
}
}
struct mlx5_reg_node_desc {
u8 desc[64];
};
static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
{
struct mlx5_reg_node_desc in;
if (mlx5_use_mad_ifc(dev))
return mlx5_query_mad_ifc_node_desc(dev, node_desc);
memset(&in, 0, sizeof(in));
return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
sizeof(struct mlx5_reg_node_desc),
MLX5_REG_NODE_DESC, 0, 0);
}
static int mlx5_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
int max_rq_sg;
int max_sq_sg;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
memset(props, 0, sizeof(*props));
err = mlx5_query_system_image_guid(ibdev,
&props->sys_image_guid);
if (err)
return err;
err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
if (err)
return err;
err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
if (err)
return err;
props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
(fw_rev_min(dev->mdev) << 16) |
@ -117,11 +256,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, block_lb_mc))
props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) &
0xffffff;
props->vendor_part_id = be16_to_cpup((__be16 *)(out_mad->data + 30));
props->hw_ver = be32_to_cpup((__be32 *)(out_mad->data + 32));
memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
props->vendor_part_id = mdev->pdev->device;
props->hw_ver = mdev->pdev->revision;
props->max_mr_size = ~0ull;
props->page_size_cap = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
@ -147,7 +283,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_fast_reg_page_list_len = (unsigned int)-1;
props->atomic_cap = IB_ATOMIC_NONE;
props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
@ -160,176 +295,233 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->odp_caps = dev->odp_caps;
#endif
out:
kfree(in_mad);
kfree(out_mad);
return 0;
}
enum mlx5_ib_width {
MLX5_IB_WIDTH_1X = 1 << 0,
MLX5_IB_WIDTH_2X = 1 << 1,
MLX5_IB_WIDTH_4X = 1 << 2,
MLX5_IB_WIDTH_8X = 1 << 3,
MLX5_IB_WIDTH_12X = 1 << 4
};
static int translate_active_width(struct ib_device *ibdev, u8 active_width,
u8 *ib_width)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
int err = 0;
if (active_width & MLX5_IB_WIDTH_1X) {
*ib_width = IB_WIDTH_1X;
} else if (active_width & MLX5_IB_WIDTH_2X) {
mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
(int)active_width);
err = -EINVAL;
} else if (active_width & MLX5_IB_WIDTH_4X) {
*ib_width = IB_WIDTH_4X;
} else if (active_width & MLX5_IB_WIDTH_8X) {
*ib_width = IB_WIDTH_8X;
} else if (active_width & MLX5_IB_WIDTH_12X) {
*ib_width = IB_WIDTH_12X;
} else {
mlx5_ib_dbg(dev, "Invalid active_width %d\n",
(int)active_width);
err = -EINVAL;
}
return err;
}
static int mlx5_mtu_to_ib_mtu(int mtu)
{
switch (mtu) {
case 256: return 1;
case 512: return 2;
case 1024: return 3;
case 2048: return 4;
case 4096: return 5;
default:
pr_warn("invalid mtu\n");
return -1;
}
}
enum ib_max_vl_num {
__IB_MAX_VL_0 = 1,
__IB_MAX_VL_0_1 = 2,
__IB_MAX_VL_0_3 = 3,
__IB_MAX_VL_0_7 = 4,
__IB_MAX_VL_0_14 = 5,
};
enum mlx5_vl_hw_cap {
MLX5_VL_HW_0 = 1,
MLX5_VL_HW_0_1 = 2,
MLX5_VL_HW_0_2 = 3,
MLX5_VL_HW_0_3 = 4,
MLX5_VL_HW_0_4 = 5,
MLX5_VL_HW_0_5 = 6,
MLX5_VL_HW_0_6 = 7,
MLX5_VL_HW_0_7 = 8,
MLX5_VL_HW_0_14 = 15
};
static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
u8 *max_vl_num)
{
switch (vl_hw_cap) {
case MLX5_VL_HW_0:
*max_vl_num = __IB_MAX_VL_0;
break;
case MLX5_VL_HW_0_1:
*max_vl_num = __IB_MAX_VL_0_1;
break;
case MLX5_VL_HW_0_3:
*max_vl_num = __IB_MAX_VL_0_3;
break;
case MLX5_VL_HW_0_7:
*max_vl_num = __IB_MAX_VL_0_7;
break;
case MLX5_VL_HW_0_14:
*max_vl_num = __IB_MAX_VL_0_14;
break;
default:
return -EINVAL;
}
return 0;
}
static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_hca_vport_context *rep;
int max_mtu;
int oper_mtu;
int err;
u8 ib_link_width_oper;
u8 vl_hw_cap;
rep = kzalloc(sizeof(*rep), GFP_KERNEL);
if (!rep) {
err = -ENOMEM;
goto out;
}
memset(props, 0, sizeof(*props));
err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
if (err)
goto out;
props->lid = rep->lid;
props->lmc = rep->lmc;
props->sm_lid = rep->sm_lid;
props->sm_sl = rep->sm_sl;
props->state = rep->vport_state;
props->phys_state = rep->port_physical_state;
props->port_cap_flags = rep->cap_mask1;
props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
props->bad_pkey_cntr = rep->pkey_violation_counter;
props->qkey_viol_cntr = rep->qkey_violation_counter;
props->subnet_timeout = rep->subnet_timeout;
props->init_type_reply = rep->init_type_reply;
err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
if (err)
goto out;
err = translate_active_width(ibdev, ib_link_width_oper,
&props->active_width);
if (err)
goto out;
err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB,
port);
if (err)
goto out;
err = mlx5_query_port_max_mtu(mdev, &max_mtu, port);
if (err)
goto out;
props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
err = mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
if (err)
goto out;
props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
if (err)
goto out;
err = translate_max_vl_num(ibdev, vl_hw_cap,
&props->max_vl_num);
out:
kfree(rep);
return err;
}
int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int ext_active_speed;
int err = -ENOMEM;
switch (mlx5_get_vport_access_method(ibdev)) {
case MLX5_VPORT_ACCESS_METHOD_MAD:
return mlx5_query_mad_ifc_port(ibdev, port, props);
if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
mlx5_ib_warn(dev, "invalid port number %d\n", port);
case MLX5_VPORT_ACCESS_METHOD_HCA:
return mlx5_query_hca_port(ibdev, port, props);
default:
return -EINVAL;
}
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
memset(props, 0, sizeof(*props));
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
if (err) {
mlx5_ib_warn(dev, "err %d\n", err);
goto out;
}
props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16));
props->lmc = out_mad->data[34] & 0x7;
props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18));
props->sm_sl = out_mad->data[36] & 0xf;
props->state = out_mad->data[32] & 0xf;
props->phys_state = out_mad->data[33] >> 4;
props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
props->gid_tbl_len = out_mad->data[50];
props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len;
props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
props->active_width = out_mad->data[31] & 0xf;
props->active_speed = out_mad->data[35] >> 4;
props->max_mtu = out_mad->data[41] & 0xf;
props->active_mtu = out_mad->data[36] >> 4;
props->subnet_timeout = out_mad->data[51] & 0x1f;
props->max_vl_num = out_mad->data[37] >> 4;
props->init_type_reply = out_mad->data[41] >> 4;
/* Check if extended speeds (EDR/FDR/...) are supported */
if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
ext_active_speed = out_mad->data[62] >> 4;
switch (ext_active_speed) {
case 1:
props->active_speed = 16; /* FDR */
break;
case 2:
props->active_speed = 32; /* EDR */
break;
}
}
/* If reported active speed is QDR, check if is FDR-10 */
if (props->active_speed == 4) {
if (mdev->port_caps[port - 1].ext_port_cap &
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
init_query_mad(in_mad);
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
err = mlx5_MAD_IFC(dev, 1, 1, port,
NULL, NULL, in_mad, out_mad);
if (err)
goto out;
/* Checking LinkSpeedActive for FDR-10 */
if (out_mad->data[15] & 0x1)
props->active_speed = 8;
}
}
out:
kfree(in_mad);
kfree(out_mad);
return err;
}
static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
switch (mlx5_get_vport_access_method(ibdev)) {
case MLX5_VPORT_ACCESS_METHOD_MAD:
return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
case MLX5_VPORT_ACCESS_METHOD_HCA:
return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
default:
return -EINVAL;
}
memcpy(gid->raw, out_mad->data + 8, 8);
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
out:
kfree(in_mad);
kfree(out_mad);
return err;
}
static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
switch (mlx5_get_vport_access_method(ibdev)) {
case MLX5_VPORT_ACCESS_METHOD_MAD:
return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
*pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
out:
kfree(in_mad);
kfree(out_mad);
return err;
case MLX5_VPORT_ACCESS_METHOD_HCA:
case MLX5_VPORT_ACCESS_METHOD_NIC:
return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
pkey);
default:
return -EINVAL;
}
}
struct mlx5_reg_node_desc {
u8 desc[64];
};
static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
struct ib_device_modify *props)
{
@ -727,37 +919,15 @@ static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
static int init_node_data(struct mlx5_ib_dev *dev)
{
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
int err;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!in_mad || !out_mad)
goto out;
init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
if (err)
goto out;
return err;
memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
dev->mdev->rev_id = dev->mdev->pdev->revision;
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
dev->mdev->rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32));
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
out:
kfree(in_mad);
kfree(out_mad);
return err;
return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
}
static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
@ -1154,8 +1324,29 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
atomic_inc(&devr->p0->usecnt);
atomic_set(&devr->s0->usecnt, 0);
memset(&attr, 0, sizeof(attr));
attr.attr.max_sge = 1;
attr.attr.max_wr = 1;
attr.srq_type = IB_SRQT_BASIC;
devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
if (IS_ERR(devr->s1)) {
ret = PTR_ERR(devr->s1);
goto error5;
}
devr->s1->device = &dev->ib_dev;
devr->s1->pd = devr->p0;
devr->s1->uobject = NULL;
devr->s1->event_handler = NULL;
devr->s1->srq_context = NULL;
devr->s1->srq_type = IB_SRQT_BASIC;
devr->s1->ext.xrc.cq = devr->c0;
atomic_inc(&devr->p0->usecnt);
atomic_set(&devr->s0->usecnt, 0);
return 0;
error5:
mlx5_ib_destroy_srq(devr->s0);
error4:
mlx5_ib_dealloc_xrcd(devr->x1);
error3:
@ -1170,6 +1361,7 @@ error0:
static void destroy_dev_resources(struct mlx5_ib_resources *devr)
{
mlx5_ib_destroy_srq(devr->s1);
mlx5_ib_destroy_srq(devr->s0);
mlx5_ib_dealloc_xrcd(devr->x0);
mlx5_ib_dealloc_xrcd(devr->x1);
@ -1183,6 +1375,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
int err;
int i;
/* don't create IB instance over Eth ports, no RoCE yet! */
if (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
return NULL;
printk_once(KERN_INFO "%s", mlx5_version);
dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
@ -1195,7 +1391,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (err)
goto err_dealloc;
get_ext_port_caps(dev);
if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev);
MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);

View File

@ -415,6 +415,7 @@ struct mlx5_ib_resources {
struct ib_xrcd *x1;
struct ib_pd *p0;
struct ib_srq *s0;
struct ib_srq *s1;
};
struct mlx5_ib_dev {
@ -594,6 +595,22 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
struct ib_smp *out_mad);
int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid);
int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
u16 *max_pkeys);
int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
u32 *vendor_id);
int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey);
int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid);
int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
struct ib_port_attr *props);
int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);

View File

@ -1012,7 +1012,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
} else {
in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
in->ctx.rq_type_srqn |=
cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
}
}

View File

@ -302,7 +302,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
in->ctx.db_record = cpu_to_be64(srq->db.dma);
err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen);
err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc);
kvfree(in);
if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);

View File

@ -12,7 +12,7 @@ config MLX5_CORE
config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on MLX5_INFINIBAND=n && NETDEVICES && ETHERNET && PCI && MLX5_CORE
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
default n
---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC.

View File

@ -2,7 +2,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o vport.o transobj.o \
mad.o transobj.o vport.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o \
en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \
en_txrx.o

View File

@ -35,7 +35,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/cq.h>
#include "vport.h"
#include <linux/mlx5/vport.h>
#include "wq.h"
#include "transobj.h"
#include "mlx5_core.h"

View File

@ -543,7 +543,7 @@ static int mlx5e_get_settings(struct net_device *netdev,
u32 eth_proto_oper;
int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN);
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
if (err) {
netdev_err(netdev, "%s: query port ptys failed: %d\n",

View File

@ -722,6 +722,8 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
u8 *dmac;
g = kcalloc(9, sizeof(*g), GFP_KERNEL);
if (!g)
return -ENOMEM;
g[0].log_sz = 2;
g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;

View File

@ -367,7 +367,7 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
mlx5_fill_page_array(&rq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
err = mlx5_create_rq(mdev, in, inlen, &rq->rqn);
err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
kvfree(in);
@ -395,7 +395,7 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
MLX5_SET(modify_rq_in, in, rq_state, curr_state);
MLX5_SET(rqc, rqc, state, next_state);
err = mlx5_modify_rq(mdev, rq->rqn, in, inlen);
err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
kvfree(in);
@ -408,7 +408,7 @@ static void mlx5e_disable_rq(struct mlx5e_rq *rq)
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
mlx5_destroy_rq(mdev, rq->rqn);
mlx5_core_destroy_rq(mdev, rq->rqn);
}
static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
@ -596,7 +596,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
mlx5_fill_page_array(&sq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
err = mlx5_create_sq(mdev, in, inlen, &sq->sqn);
err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
kvfree(in);
@ -624,7 +624,7 @@ static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
MLX5_SET(modify_sq_in, in, sq_state, curr_state);
MLX5_SET(sqc, sqc, state, next_state);
err = mlx5_modify_sq(mdev, sq->sqn, in, inlen);
err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
kvfree(in);
@ -637,7 +637,7 @@ static void mlx5e_disable_sq(struct mlx5e_sq *sq)
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
mlx5_destroy_sq(mdev, sq->sqn);
mlx5_core_destroy_sq(mdev, sq->sqn);
}
static int mlx5e_open_sq(struct mlx5e_channel *c,
@ -1115,12 +1115,12 @@ static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
MLX5_SET(tisc, tisc, prio, tc);
return mlx5_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
}
static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
{
mlx5_destroy_tis(priv->mdev, priv->tisn[tc]);
mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
}
static int mlx5e_open_tises(struct mlx5e_priv *priv)
@ -1326,7 +1326,7 @@ static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
mlx5e_build_tir_ctx(priv, tirc, tt);
err = mlx5_create_tir(mdev, in, inlen, &priv->tirn[tt]);
err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
kvfree(in);
@ -1335,7 +1335,7 @@ static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
{
mlx5_destroy_tir(priv->mdev, priv->tirn[tt]);
mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
}
static int mlx5e_open_tirs(struct mlx5e_priv *priv)
@ -1386,7 +1386,7 @@ int mlx5e_open_locked(struct net_device *netdev)
return err;
}
err = mlx5_query_port_oper_mtu(mdev, &actual_mtu);
err = mlx5_query_port_oper_mtu(mdev, &actual_mtu, 1);
if (err) {
netdev_err(netdev, "%s: mlx5_query_port_oper_mtu failed %d\n",
__func__, err);
@ -1614,7 +1614,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
int max_mtu;
int err = 0;
err = mlx5_query_port_max_mtu(mdev, &max_mtu);
err = mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
if (err)
return err;
@ -1715,7 +1715,7 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
mlx5_query_vport_mac_address(priv->mdev, netdev->dev_addr);
mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr);
}
static void mlx5e_build_netdev(struct net_device *netdev)

View File

@ -35,35 +35,64 @@
#include <linux/module.h>
#include "mlx5_core.h"
int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev)
int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out, int outlen)
{
struct mlx5_cmd_query_adapter_mbox_out *out;
struct mlx5_cmd_query_adapter_mbox_in in;
u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
memset(in, 0, sizeof(in));
MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
}
int mlx5_query_board_id(struct mlx5_core_dev *dev)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
int err;
out = kzalloc(sizeof(*out), GFP_KERNEL);
out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
memset(&in, 0, sizeof(in));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
err = mlx5_cmd_query_adapter(dev, out, outlen);
if (err)
goto out_out;
goto out;
if (out->hdr.status) {
err = mlx5_cmd_status_to_err(&out->hdr);
goto out_out;
}
memcpy(dev->board_id,
MLX5_ADDR_OF(query_adapter_out, out,
query_adapter_struct.vsd_contd_psid),
MLX5_FLD_SZ_BYTES(query_adapter_out,
query_adapter_struct.vsd_contd_psid));
memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid));
out_out:
out:
kfree(out);
return err;
}
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
int err;
out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
err = mlx5_cmd_query_adapter(mdev, out, outlen);
if (err)
goto out;
*vendor_id = MLX5_GET(query_adapter_out, out,
query_adapter_struct.ieee_vendor_id);
out:
kfree(out);
return err;
}
EXPORT_SYMBOL(mlx5_core_query_vendor_id);
int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
int err;

View File

@ -284,14 +284,6 @@ static u16 to_fw_pkey_sz(u32 size)
}
}
static u16 to_sw_pkey_sz(int pkey_sz)
{
if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
return 0;
return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
enum mlx5_cap_mode cap_mode)
{
@ -386,7 +378,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
MLX5_ST_SZ_BYTES(cmd_hca_cap));
mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
128);
/* we limit the size of the pkey table to 128 entries for now */
MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
@ -654,7 +646,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
dev->issi = 1;
return 0;
} else if (sup_issi & (1 << 0)) {
} else if (sup_issi & (1 << 0) || !sup_issi) {
return 0;
}
@ -776,9 +768,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_stop_poll;
}
err = mlx5_cmd_query_adapter(dev);
err = mlx5_query_board_id(dev);
if (err) {
dev_err(&pdev->dev, "query adapter failed\n");
dev_err(&pdev->dev, "query board id failed\n");
goto err_stop_poll;
}

View File

@ -78,7 +78,7 @@ static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
}
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);

View File

@ -104,13 +104,13 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask)
int ptys_size, int proto_mask, u8 local_port)
{
u32 in[MLX5_ST_SZ_DW(ptys_reg)];
int err;
memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, 1);
MLX5_SET(ptys_reg, in, local_port, local_port);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
@ -126,7 +126,7 @@ int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask);
err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
if (err)
return err;
@ -145,7 +145,7 @@ int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask);
err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
if (err)
return err;
@ -158,6 +158,42 @@ int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
u8 *link_width_oper, u8 local_port)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, local_port);
if (err)
return err;
*link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
u8 *proto_oper, int proto_mask,
u8 local_port)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, local_port);
if (err)
return err;
if (proto_mask == MLX5_PTYS_EN)
*proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
else
*proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper);
int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
int proto_mask)
{
@ -213,7 +249,8 @@ int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
}
static int mlx5_query_port_mtu(struct mlx5_core_dev *dev,
int *admin_mtu, int *max_mtu, int *oper_mtu)
int *admin_mtu, int *max_mtu, int *oper_mtu,
u8 local_port)
{
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@ -221,7 +258,7 @@ static int mlx5_query_port_mtu(struct mlx5_core_dev *dev,
memset(in, 0, sizeof(in));
MLX5_SET(pmtu_reg, in, local_port, 1);
MLX5_SET(pmtu_reg, in, local_port, local_port);
err = mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PMTU, 0, 0);
@ -253,14 +290,47 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu)
}
EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu)
int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
u8 local_port)
{
return mlx5_query_port_mtu(dev, NULL, max_mtu, NULL);
return mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, local_port);
}
EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu)
int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
u8 local_port)
{
return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu);
return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, local_port);
}
EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
{
u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
int err;
memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, local_port);
err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
pvlc_size, MLX5_REG_PVLC, 0, 0);
return err;
}
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
u8 *vl_hw_cap, u8 local_port)
{
u32 out[MLX5_ST_SZ_DW(pvlc_reg)];
int err;
err = mlx5_query_port_pvlc(dev, out, sizeof(out), local_port);
if (err)
return err;
*vl_hw_cap = MLX5_GET(pvlc_reg, out, vl_hw_cap);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);

View File

@ -187,10 +187,17 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_destroy_qp_mbox_in din;
struct mlx5_destroy_qp_mbox_out dout;
int err;
void *qpc;
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
if (dev->issi) {
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
/* 0xffffff means we ask to work with cqe version 0 */
MLX5_SET(qpc, qpc, user_index, 0xffffff);
}
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
mlx5_core_warn(dev, "ret %d\n", err);

View File

@ -37,6 +37,7 @@
#include <linux/mlx5/srq.h>
#include <rdma/ib_verbs.h>
#include "mlx5_core.h"
#include "transobj.h"
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
{
@ -62,6 +63,74 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
complete(&srq->free);
}
static int get_pas_size(void *srqc)
{
u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size);
u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
u32 page_offset = MLX5_GET(srqc, srqc, page_offset);
u32 po_quanta = 1 << (log_page_size - 6);
u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
u32 page_size = 1 << log_page_size;
u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size;
return rq_num_pas * sizeof(u64);
}
static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
{
void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
if (srqc_to_rmpc) {
switch (MLX5_GET(srqc, srqc, state)) {
case MLX5_SRQC_STATE_GOOD:
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
break;
case MLX5_SRQC_STATE_ERROR:
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
break;
default:
pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__,
__LINE__, MLX5_GET(srqc, srqc, state));
MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state));
}
MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature));
MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size));
MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4);
MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size));
MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset));
MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm));
MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd));
MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr));
} else {
switch (MLX5_GET(rmpc, rmpc, state)) {
case MLX5_RMPC_STATE_RDY:
MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
break;
case MLX5_RMPC_STATE_ERR:
MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
break;
default:
pr_warn("%s: %d: Unknown rmp state = 0x%x\n",
__func__, __LINE__,
MLX5_GET(rmpc, rmpc, state));
MLX5_SET(srqc, srqc, state,
MLX5_GET(rmpc, rmpc, state));
}
MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature));
MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz));
MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4);
MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz));
MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset));
MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm));
MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd));
MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr));
}
}
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
{
struct mlx5_srq_table *table = &dev->priv.srq_table;
@ -79,26 +148,311 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
}
EXPORT_SYMBOL(mlx5_core_get_srq);
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in, int inlen)
static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in, int inlen)
{
struct mlx5_create_srq_mbox_out out;
struct mlx5_srq_table *table = &dev->priv.srq_table;
struct mlx5_destroy_srq_mbox_in din;
struct mlx5_destroy_srq_mbox_out dout;
int err;
memset(&out, 0, sizeof(out));
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out),
sizeof(out));
srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
return err;
}
static int destroy_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq)
{
struct mlx5_destroy_srq_mbox_in in;
struct mlx5_destroy_srq_mbox_out out;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
in.srqn = cpu_to_be32(srq->srqn);
return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
(u32 *)(&out), sizeof(out));
}
static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
struct mlx5_arm_srq_mbox_in in;
struct mlx5_arm_srq_mbox_out out;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
in.hdr.opmod = cpu_to_be16(!!is_srq);
in.srqn = cpu_to_be32(srq->srqn);
in.lwm = cpu_to_be16(lwm);
return mlx5_cmd_exec_check_status(dev, (u32 *)(&in),
sizeof(in), (u32 *)(&out),
sizeof(out));
}
static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
{
struct mlx5_query_srq_mbox_in in;
memset(&in, 0, sizeof(in));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
in.srqn = cpu_to_be32(srq->srqn);
return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
(u32 *)out, sizeof(*out));
}
static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in,
int srq_inlen)
{
u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
void *create_in;
void *srqc;
void *xrc_srqc;
void *pas;
int pas_size;
int inlen;
int err;
srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
pas_size = get_pas_size(srqc);
inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
create_in = mlx5_vzalloc(inlen);
if (!create_in)
return -ENOMEM;
xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
xrc_srq_context_entry);
pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
memcpy(pas, in->pas, pas_size);
/* 0xffffff means we ask to work with cqe version 0 */
MLX5_SET(xrc_srqc, xrc_srqc, user_index, 0xffffff);
MLX5_SET(create_xrc_srq_in, create_in, opcode,
MLX5_CMD_OP_CREATE_XRC_SRQ);
memset(create_out, 0, sizeof(create_out));
err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
sizeof(create_out));
if (err)
goto out;
srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
out:
kvfree(create_in);
return err;
}
static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq)
{
u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
MLX5_CMD_OP_DESTROY_XRC_SRQ);
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
xrcsrq_out, sizeof(xrcsrq_out));
}
static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq, u16 lwm)
{
u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
xrcsrq_out, sizeof(xrcsrq_out));
}
static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
{
u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
u32 *xrcsrq_out;
void *srqc;
void *xrc_srqc;
int err;
xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
if (!xrcsrq_out)
return -ENOMEM;
memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
MLX5_CMD_OP_QUERY_XRC_SRQ);
MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
err = mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
xrcsrq_out,
MLX5_ST_SZ_BYTES(query_xrc_srq_out));
if (err)
goto out;
xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
xrc_srq_context_entry);
srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
out:
kvfree(xrcsrq_out);
return err;
}
static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in, int srq_inlen)
{
void *create_in;
void *rmpc;
void *srqc;
int pas_size;
int inlen;
int err;
srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
pas_size = get_pas_size(srqc);
inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
create_in = mlx5_vzalloc(inlen);
if (!create_in)
return -ENOMEM;
rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
rmpc_srqc_reformat(srqc, rmpc, true);
err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
kvfree(create_in);
return err;
}
static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq)
{
return mlx5_core_destroy_rmp(dev, srq->srqn);
}
static int arm_rmp_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
u16 lwm)
{
void *in;
void *rmpc;
void *wq;
void *bitmask;
int err;
in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
if (!in)
return -ENOMEM;
rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
MLX5_SET(wq, wq, lwm, lwm);
MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
kvfree(in);
return err;
}
static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
{
u32 *rmp_out;
void *rmpc;
void *srqc;
int err;
rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
if (!rmp_out)
return -ENOMEM;
err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
if (err)
goto out;
srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
rmpc_srqc_reformat(srqc, rmpc, false);
out:
kvfree(rmp_out);
return err;
}
static int create_srq_split(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in,
int inlen, int is_xrc)
{
if (!dev->issi)
return create_srq_cmd(dev, srq, in, inlen);
else if (srq->common.res == MLX5_RES_XSRQ)
return create_xrc_srq_cmd(dev, srq, in, inlen);
else
return create_rmp_cmd(dev, srq, in, inlen);
}
static int destroy_srq_split(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq)
{
if (!dev->issi)
return destroy_srq_cmd(dev, srq);
else if (srq->common.res == MLX5_RES_XSRQ)
return destroy_xrc_srq_cmd(dev, srq);
else
return destroy_rmp_cmd(dev, srq);
}
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in, int inlen,
int is_xrc)
{
int err;
struct mlx5_srq_table *table = &dev->priv.srq_table;
srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
err = create_srq_split(dev, srq, in, inlen, is_xrc);
if (err)
return err;
atomic_set(&srq->refcount, 1);
init_completion(&srq->free);
@ -107,25 +461,20 @@ int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
spin_unlock_irq(&table->lock);
if (err) {
mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
goto err_cmd;
goto err_destroy_srq_split;
}
return 0;
err_cmd:
memset(&din, 0, sizeof(din));
memset(&dout, 0, sizeof(dout));
din.srqn = cpu_to_be32(srq->srqn);
din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
err_destroy_srq_split:
destroy_srq_split(dev, srq);
return err;
}
EXPORT_SYMBOL(mlx5_core_create_srq);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
{
struct mlx5_destroy_srq_mbox_in in;
struct mlx5_destroy_srq_mbox_out out;
struct mlx5_srq_table *table = &dev->priv.srq_table;
struct mlx5_core_srq *tmp;
int err;
@ -142,17 +491,10 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
return -EINVAL;
}
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
in.srqn = cpu_to_be32(srq->srqn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
err = destroy_srq_split(dev, srq);
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
if (atomic_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
@ -164,48 +506,24 @@ EXPORT_SYMBOL(mlx5_core_destroy_srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out)
{
struct mlx5_query_srq_mbox_in in;
int err;
memset(&in, 0, sizeof(in));
memset(out, 0, sizeof(*out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
in.srqn = cpu_to_be32(srq->srqn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
if (err)
return err;
if (out->hdr.status)
return mlx5_cmd_status_to_err(&out->hdr);
return err;
if (!dev->issi)
return query_srq_cmd(dev, srq, out);
else if (srq->common.res == MLX5_RES_XSRQ)
return query_xrc_srq_cmd(dev, srq, out);
else
return query_rmp_cmd(dev, srq, out);
}
EXPORT_SYMBOL(mlx5_core_query_srq);
int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
struct mlx5_arm_srq_mbox_in in;
struct mlx5_arm_srq_mbox_out out;
int err;
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
in.hdr.opmod = cpu_to_be16(!!is_srq);
in.srqn = cpu_to_be32(srq->srqn);
in.lwm = cpu_to_be16(lwm);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
return err;
if (!dev->issi)
return arm_srq_cmd(dev, srq, lwm, is_srq);
else if (srq->common.res == MLX5_RES_XSRQ)
return arm_xrc_srq_cmd(dev, srq, lwm);
else
return arm_rmp_cmd(dev, srq, lwm);
}
EXPORT_SYMBOL(mlx5_core_arm_srq);

View File

@ -34,7 +34,7 @@
#include "mlx5_core.h"
#include "transobj.h"
int mlx5_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
{
u32 out[MLX5_ST_SZ_DW(create_rq_out)];
int err;
@ -49,7 +49,7 @@ int mlx5_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
return err;
}
int mlx5_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
@ -60,7 +60,7 @@ int mlx5_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
}
void mlx5_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
{
u32 in[MLX5_ST_SZ_DW(destroy_rq_in)];
u32 out[MLX5_ST_SZ_DW(destroy_rq_out)];
@ -73,7 +73,7 @@ void mlx5_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
{
u32 out[MLX5_ST_SZ_DW(create_sq_out)];
int err;
@ -88,7 +88,7 @@ int mlx5_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
return err;
}
int mlx5_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_sq_out)];
@ -99,7 +99,7 @@ int mlx5_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
}
void mlx5_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
{
u32 in[MLX5_ST_SZ_DW(destroy_sq_in)];
u32 out[MLX5_ST_SZ_DW(destroy_sq_out)];
@ -112,7 +112,8 @@ void mlx5_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn)
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn)
{
u32 out[MLX5_ST_SZ_DW(create_tir_out)];
int err;
@ -127,7 +128,7 @@ int mlx5_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn)
return err;
}
void mlx5_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
{
u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
@ -140,7 +141,8 @@ void mlx5_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn)
int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tisn)
{
u32 out[MLX5_ST_SZ_DW(create_tis_out)];
int err;
@ -155,7 +157,7 @@ int mlx5_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn)
return err;
}
void mlx5_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
{
u32 in[MLX5_ST_SZ_DW(destroy_tis_out)];
u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
@ -167,3 +169,157 @@ void mlx5_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rmpn)
{
u32 out[MLX5_ST_SZ_DW(create_rmp_out)];
int err;
MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
if (!err)
*rmpn = MLX5_GET(create_rmp_out, out, rmpn);
return err;
}
int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_rmp_out)];
MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
}
int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
{
u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)];
u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)];
memset(in, 0, sizeof(in));
MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
}
int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
{
u32 in[MLX5_ST_SZ_DW(query_rmp_in)];
int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
memset(in, 0, sizeof(in));
MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
MLX5_SET(query_rmp_in, in, rmpn, rmpn);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
}
int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
{
void *in;
void *rmpc;
void *wq;
void *bitmask;
int err;
in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
if (!in)
return -ENOMEM;
rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
MLX5_SET(modify_rmp_in, in, rmpn, rmpn);
MLX5_SET(wq, wq, lwm, lwm);
MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
kvfree(in);
return err;
}
int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *xsrqn)
{
u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
int err;
MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
if (!err)
*xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
return err;
}
int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
{
u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
}
int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
{
u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
void *srqc;
void *xrc_srqc;
int err;
memset(in, 0, sizeof(in));
MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out,
MLX5_ST_SZ_BYTES(query_xrc_srq_out));
if (!err) {
xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
xrc_srq_context_entry);
srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
}
return err;
}
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
{
u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
MLX5_SET(arm_xrc_srq_in, in, op_mod,
MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
}

View File

@ -33,15 +33,30 @@
#ifndef __TRANSOBJ_H__
#define __TRANSOBJ_H__
int mlx5_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn);
int mlx5_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
void mlx5_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
int mlx5_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn);
int mlx5_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
void mlx5_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
int mlx5_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn);
void mlx5_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
int mlx5_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn);
void mlx5_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqn);
int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *sqn);
int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn);
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tisn);
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rmpn);
int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rmpn);
int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
#endif /* __TRANSOBJ_H__ */

View File

@ -33,7 +33,7 @@
#include <linux/export.h>
#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
#include "vport.h"
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
@ -55,8 +55,9 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
return MLX5_GET(query_vport_state_out, out, state);
}
EXPORT_SYMBOL(mlx5_query_vport_state);
void mlx5_query_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
{
u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
u32 *out;
@ -82,3 +83,263 @@ void mlx5_query_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
kvfree(out);
}
EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address);
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
u8 port_num, u16 vf_num, u16 gid_index,
union ib_gid *gid)
{
int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
int is_group_manager;
void *out = NULL;
void *in = NULL;
union ib_gid *tmp;
int tbsz;
int nout;
int err;
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
vf_num, gid_index, tbsz);
if (gid_index > tbsz && gid_index != 0xffff)
return -EINVAL;
if (gid_index == 0xffff)
nout = tbsz;
else
nout = 1;
out_sz += nout * sizeof(*gid);
in = kzalloc(in_sz, GFP_KERNEL);
out = kzalloc(out_sz, GFP_KERNEL);
if (!in || !out) {
err = -ENOMEM;
goto out;
}
MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
if (other_vport) {
if (is_group_manager) {
MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
} else {
err = -EPERM;
goto out;
}
}
MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
if (MLX5_CAP_GEN(dev, num_ports) == 2)
MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
if (err)
goto out;
err = mlx5_cmd_status_to_err_v2(out);
if (err)
goto out;
tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
gid->global.subnet_prefix = tmp->global.subnet_prefix;
gid->global.interface_id = tmp->global.interface_id;
out:
kfree(in);
kfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
u8 port_num, u16 vf_num, u16 pkey_index,
u16 *pkey)
{
int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
int is_group_manager;
void *out = NULL;
void *in = NULL;
void *pkarr;
int nout;
int tbsz;
int err;
int i;
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
if (pkey_index > tbsz && pkey_index != 0xffff)
return -EINVAL;
if (pkey_index == 0xffff)
nout = tbsz;
else
nout = 1;
out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
in = kzalloc(in_sz, GFP_KERNEL);
out = kzalloc(out_sz, GFP_KERNEL);
if (!in || !out) {
err = -ENOMEM;
goto out;
}
MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
if (other_vport) {
if (is_group_manager) {
MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
} else {
err = -EPERM;
goto out;
}
}
MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
if (MLX5_CAP_GEN(dev, num_ports) == 2)
MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
if (err)
goto out;
err = mlx5_cmd_status_to_err_v2(out);
if (err)
goto out;
pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
out:
kfree(in);
kfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
u8 other_vport, u8 port_num,
u16 vf_num,
struct mlx5_hca_vport_context *rep)
{
int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
int is_group_manager;
void *out;
void *ctx;
int err;
is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
if (other_vport) {
if (is_group_manager) {
MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
} else {
err = -EPERM;
goto ex;
}
}
if (MLX5_CAP_GEN(dev, num_ports) == 2)
MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
goto ex;
err = mlx5_cmd_status_to_err_v2(out);
if (err)
goto ex;
ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
port_physical_state);
rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
port_physical_state);
rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
cap_mask1_field_select);
rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
cap_mask2_field_select);
rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
init_type_reply);
rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
subnet_timeout);
rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
qkey_violation_counter);
rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
pkey_violation_counter);
rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
system_image_guid);
ex:
kfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
__be64 *sys_image_guid)
{
struct mlx5_hca_vport_context *rep;
int err;
rep = kzalloc(sizeof(*rep), GFP_KERNEL);
if (!rep)
return -ENOMEM;
err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
if (!err)
*sys_image_guid = rep->sys_image_guid;
kfree(rep);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
u64 *node_guid)
{
struct mlx5_hca_vport_context *rep;
int err;
rep = kzalloc(sizeof(*rep), GFP_KERNEL);
if (!rep)
return -ENOMEM;
err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
if (!err)
*node_guid = rep->node_guid;
kfree(rep);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);

View File

@ -99,6 +99,12 @@ __mlx5_mask(typ, fld))
#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
#define MLX5_GET64_PR(typ, p, fld) ({ \
u64 ___t = MLX5_GET64(typ, p, fld); \
pr_debug(#fld " = 0x%llx\n", ___t); \
___t; \
})
enum {
MLX5_MAX_COMMANDS = 32,
MLX5_CMD_DATA_BLOCK_SIZE = 512,
@ -1172,4 +1178,11 @@ enum {
MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
};
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
{
if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
return 0;
return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
#endif /* MLX5_DEVICE_H */

View File

@ -107,6 +107,7 @@ enum {
MLX5_REG_PUDE = 0x5009,
MLX5_REG_PMPE = 0x5010,
MLX5_REG_PELC = 0x500e,
MLX5_REG_PVLC = 0x500f,
MLX5_REG_PMLP = 0, /* TBD */
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
@ -339,6 +340,8 @@ struct mlx5_core_mr {
enum mlx5_res_type {
MLX5_RES_QP,
MLX5_RES_SRQ,
MLX5_RES_XSRQ,
};
struct mlx5_core_rsc_common {
@ -348,6 +351,7 @@ struct mlx5_core_rsc_common {
};
struct mlx5_core_srq {
struct mlx5_core_rsc_common common; /* must be first */
u32 srqn;
int max;
int max_gs;
@ -550,6 +554,41 @@ struct mlx5_pas {
u8 log_sz;
};
enum port_state_policy {
MLX5_AAA_000
};
enum phy_port_state {
MLX5_AAA_111
};
struct mlx5_hca_vport_context {
u32 field_select;
bool sm_virt_aware;
bool has_smi;
bool has_raw;
enum port_state_policy policy;
enum phy_port_state phys_state;
enum ib_port_state vport_state;
u8 port_physical_state;
u64 sys_image_guid;
u64 port_guid;
u64 node_guid;
u32 cap_mask1;
u32 cap_mask1_perm;
u32 cap_mask2;
u32 cap_mask2_perm;
u16 lid;
u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
u8 lmc;
u8 subnet_timeout;
u16 sm_lid;
u8 sm_sl;
u16 qkey_violation_counter;
u16 pkey_violation_counter;
bool grh_required;
};
static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
{
return buf->direct.buf + offset;
@ -640,7 +679,8 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_create_srq_mbox_in *in, int inlen);
struct mlx5_create_srq_mbox_in *in, int inlen,
int is_xrc);
int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_query_srq_mbox_out *out);
@ -700,11 +740,16 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask);
int ptys_size, int proto_mask, u8 local_port);
int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
u32 *proto_cap, int proto_mask);
int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
u32 *proto_admin, int proto_mask);
int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
u8 *link_width_oper, u8 local_port);
int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
u8 *proto_oper, int proto_mask,
u8 local_port);
int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
int proto_mask);
int mlx5_set_port_status(struct mlx5_core_dev *dev,
@ -712,8 +757,12 @@ int mlx5_set_port_status(struct mlx5_core_dev *dev,
int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu);
int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu);
int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu);
int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
u8 local_port);
int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
u8 local_port);
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
u8 *vl_hw_cap, u8 local_port);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
@ -778,6 +827,7 @@ struct mlx5_interface {
void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
int mlx5_register_interface(struct mlx5_interface *intf);
void mlx5_unregister_interface(struct mlx5_interface *intf);
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
struct mlx5_profile {
u64 mask;
@ -788,4 +838,14 @@ struct mlx5_profile {
} mr_cache[MAX_MR_CACHE_ENTRIES];
};
static inline int mlx5_get_gid_table_len(u16 param)
{
if (param > 4) {
pr_warn("gid table length is zero\n");
return 0;
}
return 8 * (1 << param);
}
#endif /* MLX5_DRIVER_H */

View File

@ -2022,12 +2022,9 @@ struct mlx5_ifc_srqc_bits {
u8 reserved_9[0x40];
u8 db_record_addr_h[0x20];
u8 dbr_addr[0x40];
u8 db_record_addr_l[0x1e];
u8 reserved_10[0x2];
u8 reserved_11[0x80];
u8 reserved_10[0x80];
};
enum {
@ -2224,12 +2221,15 @@ struct mlx5_ifc_hca_vport_context_bits {
u8 has_smi[0x1];
u8 has_raw[0x1];
u8 grh_required[0x1];
u8 reserved_1[0x10];
u8 port_state_policy[0x4];
u8 phy_port_state[0x4];
u8 reserved_1[0xc];
u8 port_physical_state[0x4];
u8 vport_state_policy[0x4];
u8 port_state[0x4];
u8 vport_state[0x4];
u8 reserved_2[0x60];
u8 reserved_2[0x20];
u8 system_image_guid[0x40];
u8 port_guid[0x40];
@ -2470,9 +2470,12 @@ union mlx5_ifc_cong_control_roce_ecn_auto_bits {
};
struct mlx5_ifc_query_adapter_param_block_bits {
u8 reserved_0[0xe0];
u8 reserved_0[0xc0];
u8 reserved_1[0x10];
u8 reserved_1[0x8];
u8 ieee_vendor_id[0x18];
u8 reserved_2[0x10];
u8 vsd_vendor_id[0x10];
u8 vsd[208][0x8];
@ -3493,7 +3496,8 @@ struct mlx5_ifc_query_hca_vport_pkey_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
u8 reserved_2[0xf];
u8 reserved_2[0xb];
u8 port_num[0x4];
u8 vport_number[0x10];
u8 reserved_3[0x10];
@ -3522,7 +3526,8 @@ struct mlx5_ifc_query_hca_vport_gid_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
u8 reserved_2[0xf];
u8 reserved_2[0xb];
u8 port_num[0x4];
u8 vport_number[0x10];
u8 reserved_3[0x10];
@ -3548,7 +3553,8 @@ struct mlx5_ifc_query_hca_vport_context_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
u8 reserved_2[0xf];
u8 reserved_2[0xb];
u8 port_num[0x4];
u8 vport_number[0x10];
u8 reserved_3[0x20];
@ -4167,6 +4173,13 @@ struct mlx5_ifc_modify_rmp_out_bits {
u8 reserved_1[0x40];
};
struct mlx5_ifc_rmp_bitmask_bits {
u8 reserved[0x20];
u8 reserved1[0x1f];
u8 lwm[0x1];
};
struct mlx5_ifc_modify_rmp_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@ -4180,7 +4193,7 @@ struct mlx5_ifc_modify_rmp_in_bits {
u8 reserved_3[0x20];
u8 modify_bitmask[0x40];
struct mlx5_ifc_rmp_bitmask_bits bitmask;
u8 reserved_4[0x40];
@ -4239,7 +4252,8 @@ struct mlx5_ifc_modify_hca_vport_context_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
u8 reserved_2[0xf];
u8 reserved_2[0xb];
u8 port_num[0x4];
u8 vport_number[0x10];
u8 reserved_3[0x20];

View File

@ -36,6 +36,20 @@
#include <linux/mlx5/driver.h>
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod);
void mlx5_query_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
u8 port_num, u16 vf_num, u16 gid_index,
union ib_gid *gid);
int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
u8 port_num, u16 vf_num, u16 pkey_index,
u16 *pkey);
int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
u8 other_vport, u8 port_num,
u16 vf_num,
struct mlx5_hca_vport_context *rep);
int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
__be64 *sys_image_guid);
int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
u64 *node_guid);
#endif /* __MLX5_VPORT_H__ */