forked from Minki/linux
net/mlx5_core: HW data structs/types definitions cleanup
mlx5_ifc.h was heavily modified here since it is now generated by a script from the device specification (PRM rev 0.25). This specification is backward compatible to existing hardware. Some structures/fields were added here in order to enable the Ethernet functionality of the driver. Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: Amir Vadai <amirv@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
db058a186f
commit
e281682bf2
@ -390,8 +390,17 @@ const char *mlx5_command_str(int command)
|
||||
case MLX5_CMD_OP_ARM_RQ:
|
||||
return "ARM_RQ";
|
||||
|
||||
case MLX5_CMD_OP_RESIZE_SRQ:
|
||||
return "RESIZE_SRQ";
|
||||
case MLX5_CMD_OP_CREATE_XRC_SRQ:
|
||||
return "CREATE_XRC_SRQ";
|
||||
|
||||
case MLX5_CMD_OP_DESTROY_XRC_SRQ:
|
||||
return "DESTROY_XRC_SRQ";
|
||||
|
||||
case MLX5_CMD_OP_QUERY_XRC_SRQ:
|
||||
return "QUERY_XRC_SRQ";
|
||||
|
||||
case MLX5_CMD_OP_ARM_XRC_SRQ:
|
||||
return "ARM_XRC_SRQ";
|
||||
|
||||
case MLX5_CMD_OP_ALLOC_PD:
|
||||
return "ALLOC_PD";
|
||||
@ -408,8 +417,8 @@ const char *mlx5_command_str(int command)
|
||||
case MLX5_CMD_OP_ATTACH_TO_MCG:
|
||||
return "ATTACH_TO_MCG";
|
||||
|
||||
case MLX5_CMD_OP_DETACH_FROM_MCG:
|
||||
return "DETACH_FROM_MCG";
|
||||
case MLX5_CMD_OP_DETTACH_FROM_MCG:
|
||||
return "DETTACH_FROM_MCG";
|
||||
|
||||
case MLX5_CMD_OP_ALLOC_XRCD:
|
||||
return "ALLOC_XRCD";
|
||||
|
@ -95,7 +95,7 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps)
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct),
|
||||
memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability),
|
||||
sizeof(*caps));
|
||||
|
||||
mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n",
|
||||
|
@ -319,8 +319,7 @@ static void fw2drv_caps(struct mlx5_caps *caps, void *out)
|
||||
gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz);
|
||||
gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz);
|
||||
gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp);
|
||||
gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz);
|
||||
gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs);
|
||||
gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srq);
|
||||
gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz);
|
||||
gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq);
|
||||
gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz);
|
||||
@ -391,7 +390,7 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
|
||||
goto query_ex;
|
||||
}
|
||||
mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod));
|
||||
fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct));
|
||||
fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability));
|
||||
|
||||
query_ex:
|
||||
kfree(out);
|
||||
@ -453,7 +452,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
|
||||
/* disable checksum */
|
||||
cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
|
||||
|
||||
copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct),
|
||||
copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability),
|
||||
cur_caps);
|
||||
err = set_caps(dev, set_ctx, set_sz);
|
||||
|
||||
|
@ -91,7 +91,7 @@ int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
|
||||
|
||||
memset(&in, 0, sizeof(in));
|
||||
memset(&out, 0, sizeof(out));
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG);
|
||||
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG);
|
||||
memcpy(in.gid, mgid, sizeof(*mgid));
|
||||
in.qpn = cpu_to_be32(qpn);
|
||||
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
|
||||
|
@ -223,3 +223,40 @@ int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
|
||||
{
|
||||
phys_addr_t pfn;
|
||||
phys_addr_t uar_bar_start;
|
||||
int err;
|
||||
|
||||
err = mlx5_cmd_alloc_uar(mdev, &uar->index);
|
||||
if (err) {
|
||||
mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
uar_bar_start = pci_resource_start(mdev->pdev, 0);
|
||||
pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index;
|
||||
uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
|
||||
if (!uar->map) {
|
||||
mlx5_core_warn(mdev, "ioremap() failed, %d\n", err);
|
||||
err = -ENOMEM;
|
||||
goto err_free_uar;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_uar:
|
||||
mlx5_cmd_free_uar(mdev, uar->index);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_alloc_map_uar);
|
||||
|
||||
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
|
||||
{
|
||||
iounmap(uar->map);
|
||||
mlx5_cmd_free_uar(mdev, uar->index);
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_unmap_free_uar);
|
||||
|
@ -35,6 +35,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <linux/mlx5/mlx5_ifc.h>
|
||||
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
#define MLX5_SET_HOST_ENDIANNESS 0
|
||||
@ -70,6 +71,14 @@
|
||||
<< __mlx5_dw_bit_off(typ, fld))); \
|
||||
} while (0)
|
||||
|
||||
#define MLX5_SET_TO_ONES(typ, p, fld) do { \
|
||||
BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
|
||||
*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
|
||||
cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
|
||||
(~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
|
||||
<< __mlx5_dw_bit_off(typ, fld))); \
|
||||
} while (0)
|
||||
|
||||
#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
|
||||
__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
|
||||
__mlx5_mask(typ, fld))
|
||||
@ -264,6 +273,7 @@ enum {
|
||||
MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
|
||||
MLX5_OPCODE_SEND = 0x0a,
|
||||
MLX5_OPCODE_SEND_IMM = 0x0b,
|
||||
MLX5_OPCODE_LSO = 0x0e,
|
||||
MLX5_OPCODE_RDMA_READ = 0x10,
|
||||
MLX5_OPCODE_ATOMIC_CS = 0x11,
|
||||
MLX5_OPCODE_ATOMIC_FA = 0x12,
|
||||
@ -541,6 +551,10 @@ struct mlx5_cmd_prot_block {
|
||||
u8 sig;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
|
||||
};
|
||||
|
||||
struct mlx5_err_cqe {
|
||||
u8 rsvd0[32];
|
||||
__be32 srqn;
|
||||
@ -554,13 +568,22 @@ struct mlx5_err_cqe {
|
||||
};
|
||||
|
||||
struct mlx5_cqe64 {
|
||||
u8 rsvd0[17];
|
||||
u8 rsvd0[4];
|
||||
u8 lro_tcppsh_abort_dupack;
|
||||
u8 lro_min_ttl;
|
||||
__be16 lro_tcp_win;
|
||||
__be32 lro_ack_seq_num;
|
||||
__be32 rss_hash_result;
|
||||
u8 rss_hash_type;
|
||||
u8 ml_path;
|
||||
u8 rsvd20[4];
|
||||
u8 rsvd20[2];
|
||||
__be16 check_sum;
|
||||
__be16 slid;
|
||||
__be32 flags_rqpn;
|
||||
u8 rsvd28[4];
|
||||
__be32 srqn;
|
||||
u8 hds_ip_ext;
|
||||
u8 l4_hdr_type_etc;
|
||||
__be16 vlan_info;
|
||||
__be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
|
||||
__be32 imm_inval_pkey;
|
||||
u8 rsvd40[4];
|
||||
__be32 byte_cnt;
|
||||
@ -571,6 +594,40 @@ struct mlx5_cqe64 {
|
||||
u8 op_own;
|
||||
};
|
||||
|
||||
static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
|
||||
}
|
||||
|
||||
static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
return (cqe->l4_hdr_type_etc >> 4) & 0x7;
|
||||
}
|
||||
|
||||
static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
|
||||
{
|
||||
return !!(cqe->l4_hdr_type_etc & 0x1);
|
||||
}
|
||||
|
||||
enum {
|
||||
CQE_L4_HDR_TYPE_NONE = 0x0,
|
||||
CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
|
||||
CQE_L4_HDR_TYPE_UDP = 0x2,
|
||||
CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
|
||||
CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
|
||||
};
|
||||
|
||||
enum {
|
||||
CQE_RSS_HTYPE_IP = 0x3 << 6,
|
||||
CQE_RSS_HTYPE_L4 = 0x3 << 2,
|
||||
};
|
||||
|
||||
enum {
|
||||
CQE_L2_OK = 1 << 0,
|
||||
CQE_L3_OK = 1 << 1,
|
||||
CQE_L4_OK = 1 << 2,
|
||||
};
|
||||
|
||||
struct mlx5_sig_err_cqe {
|
||||
u8 rsvd0[16];
|
||||
__be32 expected_trans_sig;
|
||||
@ -996,4 +1053,52 @@ struct mlx5_destroy_psv_out {
|
||||
u8 rsvd[8];
|
||||
};
|
||||
|
||||
#define MLX5_CMD_OP_MAX 0x920
|
||||
|
||||
enum {
|
||||
VPORT_STATE_DOWN = 0x0,
|
||||
VPORT_STATE_UP = 0x1,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_L3_PROT_TYPE_IPV4 = 0,
|
||||
MLX5_L3_PROT_TYPE_IPV6 = 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_L4_PROT_TYPE_TCP = 0,
|
||||
MLX5_L4_PROT_TYPE_UDP = 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
|
||||
MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
|
||||
MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
|
||||
MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
|
||||
MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_MATCH_OUTER_HEADERS = 1 << 0,
|
||||
MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
|
||||
MLX5_MATCH_INNER_HEADERS = 1 << 2,
|
||||
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
|
||||
MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0,
|
||||
MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1,
|
||||
MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
|
||||
MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
|
||||
};
|
||||
|
||||
#endif /* MLX5_DEVICE_H */
|
||||
|
@ -44,7 +44,6 @@
|
||||
|
||||
#include <linux/mlx5/device.h>
|
||||
#include <linux/mlx5/doorbell.h>
|
||||
#include <linux/mlx5/mlx5_ifc.h>
|
||||
|
||||
enum {
|
||||
MLX5_BOARD_ID_LEN = 64,
|
||||
@ -278,7 +277,6 @@ struct mlx5_general_caps {
|
||||
u8 log_max_mkey;
|
||||
u8 log_max_pd;
|
||||
u8 log_max_srq;
|
||||
u8 log_max_strq;
|
||||
u8 log_max_mrw_sz;
|
||||
u8 log_max_bsf_list_size;
|
||||
u8 log_max_klm_list_size;
|
||||
@ -664,6 +662,8 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
|
||||
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
|
||||
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
|
||||
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
|
||||
int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
|
||||
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
|
||||
void mlx5_health_cleanup(void);
|
||||
void __init mlx5_health_init(void);
|
||||
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -134,13 +134,21 @@ enum {
|
||||
|
||||
enum {
|
||||
MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
|
||||
MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
|
||||
MLX5_WQE_CTRL_SOLICITED = 1 << 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_SEND_WQE_DS = 16,
|
||||
MLX5_SEND_WQE_BB = 64,
|
||||
};
|
||||
|
||||
#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
|
||||
|
||||
enum {
|
||||
MLX5_SEND_WQE_MAX_WQEBBS = 16,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
|
||||
MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
|
||||
@ -200,6 +208,23 @@ struct mlx5_wqe_ctrl_seg {
|
||||
#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
|
||||
#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
|
||||
|
||||
enum {
|
||||
MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
|
||||
MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5,
|
||||
MLX5_ETH_WQE_L3_CSUM = 1 << 6,
|
||||
MLX5_ETH_WQE_L4_CSUM = 1 << 7,
|
||||
};
|
||||
|
||||
struct mlx5_wqe_eth_seg {
|
||||
u8 rsvd0[4];
|
||||
u8 cs_flags;
|
||||
u8 rsvd1;
|
||||
__be16 mss;
|
||||
__be32 rsvd2;
|
||||
__be16 inline_hdr_sz;
|
||||
u8 inline_hdr_start[2];
|
||||
};
|
||||
|
||||
struct mlx5_wqe_xrc_seg {
|
||||
__be32 xrc_srqn;
|
||||
u8 rsvd[12];
|
||||
|
Loading…
Reference in New Issue
Block a user