forked from Minki/linux
Merge of the qedr RoCE driver
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJYATXdAAoJELgmozMOVy/dn48P/2lBCAR7pJMU5AC4s1VZsYHr A7ep5qpkmD5qGnnHNjLA2TIK/8lCy80ACt/HbV7588TxyZYpa+wIaQAdIyuUfUyS HVdMTLMqdfYOdnPHNDiKKhdvw8Ty8gGlHsnxay32+m3WJtCPxsRObrciJO984lIk DXKBsYuOQST5Df/1eHWSCPVUn5jHW4bKh7jPM1cs7CtFZ2bJHJQrKECm0SoKvj+3 3BNCg2gVRXeGwfX4KoSYf87nMJCCXBlNzBsqyVPjsB5teJjjk9mXV5y6qsHps9Hu JrMjMPlvRzkUil8ZP5RiPHx29IlZypwudpswqM9cw6mxfsvvORYtYBD3BVC6Vt4A WPVXGkx/sEO9XgbasuUJEL0ui4I3UR+lLP8MwefMiPteJ/lGdM/vydS9t57hvk9s JeL/ep0Us70VX0VSEkc62RvYbKPcRk4qonF8liRq7nit3l45vL5YLvbTQeqe7pbI CN0lBn83K9Z4GGwPqDzbD3pwiZ2wFV4VvrWXqOeyexT/kNi1iJlQcfNHJcUiI9vg mkzxWvvWY+KieunrJQGWEQPkuD7fpFF77KFkIYSFVfkHBrSjc+n5a3lAY/xT8k6D rixIl9ZhA8dMjkCzh0xqGHgEoldh4rO1ctpaTDLg3HsNkedctDEpyx4HFMhiXE2w INAqVa/uOUC0a/uPlcWr =Oifo -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma Pull rdma qedr RoCE driver from Doug Ledford: "Early on in the merge window I mentioned I had a backlog of new drivers waiting to be reviewed and that, in addition to the hns-roce driver, I wanted to get possible a couple more reviewed. I ended up only having the time to complete one of the additional drivers. During Dave Miller's pull request this go around, there were a series of 9 patches to the QLogic qed net driver that add basic support for a paired RoCE driver. That support is currently not functional because it is missing the matching RoCE driver in the RDMA subsystem. I managed to finish that review. However, because it goes against part of Dave's net pull, and a part that was accepted a day or two after the merge window opened, to apply cleanly it has to be applied to either the tip of Dave's net branch, or as I did in this case, I just applied it to your master after you had taken Dave's pull request." * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: qedr: Add events support and register IB device qedr: Add GSI support qedr: Add LL2 RoCE interface qedr: Add support for data path qedr: Add support for memory registeration verbs qedr: Add support for QP verbs qedr: Add support for PD,PKEY and CQ verbs qedr: Add support for user context verbs qedr: Add support for RoCE HW init qedr: Add RoCE driver framework
This commit is contained in:
commit
ac9ef8cd07
@ -89,4 +89,6 @@ source "drivers/infiniband/sw/rxe/Kconfig"
|
||||
|
||||
source "drivers/infiniband/hw/hfi1/Kconfig"
|
||||
|
||||
source "drivers/infiniband/hw/qedr/Kconfig"
|
||||
|
||||
endif # INFINIBAND
|
||||
|
@ -10,3 +10,4 @@ obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
|
||||
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
|
||||
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
|
||||
obj-$(CONFIG_INFINIBAND_HNS) += hns/
|
||||
obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
|
||||
|
7
drivers/infiniband/hw/qedr/Kconfig
Normal file
7
drivers/infiniband/hw/qedr/Kconfig
Normal file
@ -0,0 +1,7 @@
|
||||
config INFINIBAND_QEDR
|
||||
tristate "QLogic RoCE driver"
|
||||
depends on 64BIT && QEDE
|
||||
select QED_LL2
|
||||
---help---
|
||||
This driver provides low-level InfiniBand over Ethernet
|
||||
support for QLogic QED host channel adapters (HCAs).
|
3
drivers/infiniband/hw/qedr/Makefile
Normal file
3
drivers/infiniband/hw/qedr/Makefile
Normal file
@ -0,0 +1,3 @@
|
||||
obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o
|
||||
|
||||
qedr-y := main.o verbs.o qedr_cm.o
|
914
drivers/infiniband/hw/qedr/main.c
Normal file
914
drivers/infiniband/hw/qedr/main.c
Normal file
@ -0,0 +1,914 @@
|
||||
/* QLogic qedr NIC Driver
|
||||
* Copyright (c) 2015-2016 QLogic Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and /or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_addr.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <net/addrconf.h>
|
||||
#include <linux/qed/qede_roce.h>
|
||||
#include <linux/qed/qed_chain.h>
|
||||
#include <linux/qed/qed_if.h>
|
||||
#include "qedr.h"
|
||||
#include "verbs.h"
|
||||
#include <rdma/qedr-abi.h>
|
||||
|
||||
MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
|
||||
MODULE_AUTHOR("QLogic Corporation");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_VERSION(QEDR_MODULE_VERSION);
|
||||
|
||||
#define QEDR_WQ_MULTIPLIER_DFT (3)
|
||||
|
||||
void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
|
||||
enum ib_event_type type)
|
||||
{
|
||||
struct ib_event ibev;
|
||||
|
||||
ibev.device = &dev->ibdev;
|
||||
ibev.element.port_num = port_num;
|
||||
ibev.event = type;
|
||||
|
||||
ib_dispatch_event(&ibev);
|
||||
}
|
||||
|
||||
static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
|
||||
u8 port_num)
|
||||
{
|
||||
return IB_LINK_LAYER_ETHERNET;
|
||||
}
|
||||
|
||||
static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str,
|
||||
size_t str_len)
|
||||
{
|
||||
struct qedr_dev *qedr = get_qedr_dev(ibdev);
|
||||
u32 fw_ver = (u32)qedr->attr.fw_ver;
|
||||
|
||||
snprintf(str, str_len, "%d. %d. %d. %d",
|
||||
(fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
|
||||
(fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
|
||||
}
|
||||
|
||||
static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
|
||||
{
|
||||
struct qedr_dev *qdev;
|
||||
|
||||
qdev = get_qedr_dev(dev);
|
||||
dev_hold(qdev->ndev);
|
||||
|
||||
/* The HW vendor's device driver must guarantee
|
||||
* that this function returns NULL before the net device reaches
|
||||
* NETDEV_UNREGISTER_FINAL state.
|
||||
*/
|
||||
return qdev->ndev;
|
||||
}
|
||||
|
||||
static int qedr_register_device(struct qedr_dev *dev)
|
||||
{
|
||||
strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
|
||||
|
||||
dev->ibdev.node_guid = dev->attr.node_guid;
|
||||
memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
|
||||
dev->ibdev.owner = THIS_MODULE;
|
||||
dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION;
|
||||
|
||||
dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
|
||||
QEDR_UVERBS(QUERY_DEVICE) |
|
||||
QEDR_UVERBS(QUERY_PORT) |
|
||||
QEDR_UVERBS(ALLOC_PD) |
|
||||
QEDR_UVERBS(DEALLOC_PD) |
|
||||
QEDR_UVERBS(CREATE_COMP_CHANNEL) |
|
||||
QEDR_UVERBS(CREATE_CQ) |
|
||||
QEDR_UVERBS(RESIZE_CQ) |
|
||||
QEDR_UVERBS(DESTROY_CQ) |
|
||||
QEDR_UVERBS(REQ_NOTIFY_CQ) |
|
||||
QEDR_UVERBS(CREATE_QP) |
|
||||
QEDR_UVERBS(MODIFY_QP) |
|
||||
QEDR_UVERBS(QUERY_QP) |
|
||||
QEDR_UVERBS(DESTROY_QP) |
|
||||
QEDR_UVERBS(REG_MR) |
|
||||
QEDR_UVERBS(DEREG_MR) |
|
||||
QEDR_UVERBS(POLL_CQ) |
|
||||
QEDR_UVERBS(POST_SEND) |
|
||||
QEDR_UVERBS(POST_RECV);
|
||||
|
||||
dev->ibdev.phys_port_cnt = 1;
|
||||
dev->ibdev.num_comp_vectors = dev->num_cnq;
|
||||
dev->ibdev.node_type = RDMA_NODE_IB_CA;
|
||||
|
||||
dev->ibdev.query_device = qedr_query_device;
|
||||
dev->ibdev.query_port = qedr_query_port;
|
||||
dev->ibdev.modify_port = qedr_modify_port;
|
||||
|
||||
dev->ibdev.query_gid = qedr_query_gid;
|
||||
dev->ibdev.add_gid = qedr_add_gid;
|
||||
dev->ibdev.del_gid = qedr_del_gid;
|
||||
|
||||
dev->ibdev.alloc_ucontext = qedr_alloc_ucontext;
|
||||
dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
|
||||
dev->ibdev.mmap = qedr_mmap;
|
||||
|
||||
dev->ibdev.alloc_pd = qedr_alloc_pd;
|
||||
dev->ibdev.dealloc_pd = qedr_dealloc_pd;
|
||||
|
||||
dev->ibdev.create_cq = qedr_create_cq;
|
||||
dev->ibdev.destroy_cq = qedr_destroy_cq;
|
||||
dev->ibdev.resize_cq = qedr_resize_cq;
|
||||
dev->ibdev.req_notify_cq = qedr_arm_cq;
|
||||
|
||||
dev->ibdev.create_qp = qedr_create_qp;
|
||||
dev->ibdev.modify_qp = qedr_modify_qp;
|
||||
dev->ibdev.query_qp = qedr_query_qp;
|
||||
dev->ibdev.destroy_qp = qedr_destroy_qp;
|
||||
|
||||
dev->ibdev.query_pkey = qedr_query_pkey;
|
||||
|
||||
dev->ibdev.create_ah = qedr_create_ah;
|
||||
dev->ibdev.destroy_ah = qedr_destroy_ah;
|
||||
|
||||
dev->ibdev.get_dma_mr = qedr_get_dma_mr;
|
||||
dev->ibdev.dereg_mr = qedr_dereg_mr;
|
||||
dev->ibdev.reg_user_mr = qedr_reg_user_mr;
|
||||
dev->ibdev.alloc_mr = qedr_alloc_mr;
|
||||
dev->ibdev.map_mr_sg = qedr_map_mr_sg;
|
||||
|
||||
dev->ibdev.poll_cq = qedr_poll_cq;
|
||||
dev->ibdev.post_send = qedr_post_send;
|
||||
dev->ibdev.post_recv = qedr_post_recv;
|
||||
|
||||
dev->ibdev.process_mad = qedr_process_mad;
|
||||
dev->ibdev.get_port_immutable = qedr_port_immutable;
|
||||
dev->ibdev.get_netdev = qedr_get_netdev;
|
||||
|
||||
dev->ibdev.dma_device = &dev->pdev->dev;
|
||||
|
||||
dev->ibdev.get_link_layer = qedr_link_layer;
|
||||
dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
|
||||
|
||||
return ib_register_device(&dev->ibdev, NULL);
|
||||
}
|
||||
|
||||
/* This function allocates fast-path status block memory */
|
||||
static int qedr_alloc_mem_sb(struct qedr_dev *dev,
|
||||
struct qed_sb_info *sb_info, u16 sb_id)
|
||||
{
|
||||
struct status_block *sb_virt;
|
||||
dma_addr_t sb_phys;
|
||||
int rc;
|
||||
|
||||
sb_virt = dma_alloc_coherent(&dev->pdev->dev,
|
||||
sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
|
||||
if (!sb_virt)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = dev->ops->common->sb_init(dev->cdev, sb_info,
|
||||
sb_virt, sb_phys, sb_id,
|
||||
QED_SB_TYPE_CNQ);
|
||||
if (rc) {
|
||||
pr_err("Status block initialization failed\n");
|
||||
dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
|
||||
sb_virt, sb_phys);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qedr_free_mem_sb(struct qedr_dev *dev,
|
||||
struct qed_sb_info *sb_info, int sb_id)
|
||||
{
|
||||
if (sb_info->sb_virt) {
|
||||
dev->ops->common->sb_release(dev->cdev, sb_info, sb_id);
|
||||
dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
|
||||
(void *)sb_info->sb_virt, sb_info->sb_phys);
|
||||
}
|
||||
}
|
||||
|
||||
static void qedr_free_resources(struct qedr_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dev->num_cnq; i++) {
|
||||
qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
|
||||
dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
|
||||
}
|
||||
|
||||
kfree(dev->cnq_array);
|
||||
kfree(dev->sb_array);
|
||||
kfree(dev->sgid_tbl);
|
||||
}
|
||||
|
||||
static int qedr_alloc_resources(struct qedr_dev *dev)
|
||||
{
|
||||
struct qedr_cnq *cnq;
|
||||
__le16 *cons_pi;
|
||||
u16 n_entries;
|
||||
int i, rc;
|
||||
|
||||
dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
|
||||
QEDR_MAX_SGID, GFP_KERNEL);
|
||||
if (!dev->sgid_tbl)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&dev->sgid_lock);
|
||||
|
||||
/* Allocate Status blocks for CNQ */
|
||||
dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
|
||||
GFP_KERNEL);
|
||||
if (!dev->sb_array) {
|
||||
rc = -ENOMEM;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
dev->cnq_array = kcalloc(dev->num_cnq,
|
||||
sizeof(*dev->cnq_array), GFP_KERNEL);
|
||||
if (!dev->cnq_array) {
|
||||
rc = -ENOMEM;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
|
||||
|
||||
/* Allocate CNQ PBLs */
|
||||
n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
|
||||
for (i = 0; i < dev->num_cnq; i++) {
|
||||
cnq = &dev->cnq_array[i];
|
||||
|
||||
rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
|
||||
dev->sb_start + i);
|
||||
if (rc)
|
||||
goto err3;
|
||||
|
||||
rc = dev->ops->common->chain_alloc(dev->cdev,
|
||||
QED_CHAIN_USE_TO_CONSUME,
|
||||
QED_CHAIN_MODE_PBL,
|
||||
QED_CHAIN_CNT_TYPE_U16,
|
||||
n_entries,
|
||||
sizeof(struct regpair *),
|
||||
&cnq->pbl);
|
||||
if (rc)
|
||||
goto err4;
|
||||
|
||||
cnq->dev = dev;
|
||||
cnq->sb = &dev->sb_array[i];
|
||||
cons_pi = dev->sb_array[i].sb_virt->pi_array;
|
||||
cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
|
||||
cnq->index = i;
|
||||
sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
|
||||
i, qed_chain_get_cons_idx(&cnq->pbl));
|
||||
}
|
||||
|
||||
return 0;
|
||||
err4:
|
||||
qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
|
||||
err3:
|
||||
for (--i; i >= 0; i--) {
|
||||
dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
|
||||
qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
|
||||
}
|
||||
kfree(dev->cnq_array);
|
||||
err2:
|
||||
kfree(dev->sb_array);
|
||||
err1:
|
||||
kfree(dev->sgid_tbl);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* QEDR sysfs interface */
|
||||
static ssize_t show_rev(struct device *device, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct qedr_dev *dev = dev_get_drvdata(device);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
|
||||
}
|
||||
|
||||
static ssize_t show_hca_type(struct device *device,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
|
||||
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
|
||||
|
||||
static struct device_attribute *qedr_attributes[] = {
|
||||
&dev_attr_hw_rev,
|
||||
&dev_attr_hca_type
|
||||
};
|
||||
|
||||
static void qedr_remove_sysfiles(struct qedr_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
|
||||
device_remove_file(&dev->ibdev.dev, qedr_attributes[i]);
|
||||
}
|
||||
|
||||
static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_dev *bridge;
|
||||
u32 val;
|
||||
|
||||
dev->atomic_cap = IB_ATOMIC_NONE;
|
||||
|
||||
bridge = pdev->bus->self;
|
||||
if (!bridge)
|
||||
return;
|
||||
|
||||
/* Check whether we are connected directly or via a switch */
|
||||
while (bridge && bridge->bus->parent) {
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT,
|
||||
"Device is not connected directly to root. bridge->bus->number=%d primary=%d\n",
|
||||
bridge->bus->number, bridge->bus->primary);
|
||||
/* Need to check Atomic Op Routing Supported all the way to
|
||||
* root complex.
|
||||
*/
|
||||
pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
|
||||
if (!(val & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) {
|
||||
pcie_capability_clear_word(pdev,
|
||||
PCI_EXP_DEVCTL2,
|
||||
PCI_EXP_DEVCTL2_ATOMIC_REQ);
|
||||
return;
|
||||
}
|
||||
bridge = bridge->bus->parent->self;
|
||||
}
|
||||
bridge = pdev->bus->self;
|
||||
|
||||
/* according to bridge capability */
|
||||
pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
|
||||
if (val & PCI_EXP_DEVCAP2_ATOMIC_COMP64) {
|
||||
pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
|
||||
PCI_EXP_DEVCTL2_ATOMIC_REQ);
|
||||
dev->atomic_cap = IB_ATOMIC_GLOB;
|
||||
} else {
|
||||
pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
|
||||
PCI_EXP_DEVCTL2_ATOMIC_REQ);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct qed_rdma_ops *qed_ops;
|
||||
|
||||
#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
|
||||
|
||||
static irqreturn_t qedr_irq_handler(int irq, void *handle)
|
||||
{
|
||||
u16 hw_comp_cons, sw_comp_cons;
|
||||
struct qedr_cnq *cnq = handle;
|
||||
struct regpair *cq_handle;
|
||||
struct qedr_cq *cq;
|
||||
|
||||
qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
|
||||
|
||||
qed_sb_update_sb_idx(cnq->sb);
|
||||
|
||||
hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
|
||||
sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
|
||||
|
||||
/* Align protocol-index and chain reads */
|
||||
rmb();
|
||||
|
||||
while (sw_comp_cons != hw_comp_cons) {
|
||||
cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
|
||||
cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
|
||||
cq_handle->lo);
|
||||
|
||||
if (cq == NULL) {
|
||||
DP_ERR(cnq->dev,
|
||||
"Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
|
||||
cq_handle->hi, cq_handle->lo, sw_comp_cons,
|
||||
hw_comp_cons);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
|
||||
DP_ERR(cnq->dev,
|
||||
"Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
|
||||
cq_handle->hi, cq_handle->lo, cq);
|
||||
break;
|
||||
}
|
||||
|
||||
cq->arm_flags = 0;
|
||||
|
||||
if (cq->ibcq.comp_handler)
|
||||
(*cq->ibcq.comp_handler)
|
||||
(&cq->ibcq, cq->ibcq.cq_context);
|
||||
|
||||
sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
|
||||
|
||||
cnq->n_comp++;
|
||||
|
||||
}
|
||||
|
||||
qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
|
||||
sw_comp_cons);
|
||||
|
||||
qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void qedr_sync_free_irqs(struct qedr_dev *dev)
|
||||
{
|
||||
u32 vector;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dev->int_info.used_cnt; i++) {
|
||||
if (dev->int_info.msix_cnt) {
|
||||
vector = dev->int_info.msix[i * dev->num_hwfns].vector;
|
||||
synchronize_irq(vector);
|
||||
free_irq(vector, &dev->cnq_array[i]);
|
||||
}
|
||||
}
|
||||
|
||||
dev->int_info.used_cnt = 0;
|
||||
}
|
||||
|
||||
static int qedr_req_msix_irqs(struct qedr_dev *dev)
|
||||
{
|
||||
int i, rc = 0;
|
||||
|
||||
if (dev->num_cnq > dev->int_info.msix_cnt) {
|
||||
DP_ERR(dev,
|
||||
"Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
|
||||
dev->num_cnq, dev->int_info.msix_cnt);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < dev->num_cnq; i++) {
|
||||
rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector,
|
||||
qedr_irq_handler, 0, dev->cnq_array[i].name,
|
||||
&dev->cnq_array[i]);
|
||||
if (rc) {
|
||||
DP_ERR(dev, "Request cnq %d irq failed\n", i);
|
||||
qedr_sync_free_irqs(dev);
|
||||
} else {
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT,
|
||||
"Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
|
||||
dev->cnq_array[i].name, i,
|
||||
&dev->cnq_array[i]);
|
||||
dev->int_info.used_cnt++;
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int qedr_setup_irqs(struct qedr_dev *dev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
|
||||
|
||||
/* Learn Interrupt configuration */
|
||||
rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
|
||||
if (rc) {
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (dev->int_info.msix_cnt) {
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
|
||||
dev->int_info.msix_cnt);
|
||||
rc = qedr_req_msix_irqs(dev);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qedr_set_device_attr(struct qedr_dev *dev)
|
||||
{
|
||||
struct qed_rdma_device *qed_attr;
|
||||
struct qedr_device_attr *attr;
|
||||
u32 page_size;
|
||||
|
||||
/* Part 1 - query core capabilities */
|
||||
qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
|
||||
|
||||
/* Part 2 - check capabilities */
|
||||
page_size = ~dev->attr.page_size_caps + 1;
|
||||
if (page_size > PAGE_SIZE) {
|
||||
DP_ERR(dev,
|
||||
"Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
|
||||
PAGE_SIZE, page_size);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Part 3 - copy and update capabilities */
|
||||
attr = &dev->attr;
|
||||
attr->vendor_id = qed_attr->vendor_id;
|
||||
attr->vendor_part_id = qed_attr->vendor_part_id;
|
||||
attr->hw_ver = qed_attr->hw_ver;
|
||||
attr->fw_ver = qed_attr->fw_ver;
|
||||
attr->node_guid = qed_attr->node_guid;
|
||||
attr->sys_image_guid = qed_attr->sys_image_guid;
|
||||
attr->max_cnq = qed_attr->max_cnq;
|
||||
attr->max_sge = qed_attr->max_sge;
|
||||
attr->max_inline = qed_attr->max_inline;
|
||||
attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
|
||||
attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
|
||||
attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
|
||||
attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
|
||||
attr->max_dev_resp_rd_atomic_resc =
|
||||
qed_attr->max_dev_resp_rd_atomic_resc;
|
||||
attr->max_cq = qed_attr->max_cq;
|
||||
attr->max_qp = qed_attr->max_qp;
|
||||
attr->max_mr = qed_attr->max_mr;
|
||||
attr->max_mr_size = qed_attr->max_mr_size;
|
||||
attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
|
||||
attr->max_mw = qed_attr->max_mw;
|
||||
attr->max_fmr = qed_attr->max_fmr;
|
||||
attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
|
||||
attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
|
||||
attr->max_pd = qed_attr->max_pd;
|
||||
attr->max_ah = qed_attr->max_ah;
|
||||
attr->max_pkey = qed_attr->max_pkey;
|
||||
attr->max_srq = qed_attr->max_srq;
|
||||
attr->max_srq_wr = qed_attr->max_srq_wr;
|
||||
attr->dev_caps = qed_attr->dev_caps;
|
||||
attr->page_size_caps = qed_attr->page_size_caps;
|
||||
attr->dev_ack_delay = qed_attr->dev_ack_delay;
|
||||
attr->reserved_lkey = qed_attr->reserved_lkey;
|
||||
attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
|
||||
attr->max_stats_queues = qed_attr->max_stats_queues;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qedr_unaffiliated_event(void *context,
|
||||
u8 event_code)
|
||||
{
|
||||
pr_err("unaffiliated event not implemented yet\n");
|
||||
}
|
||||
|
||||
void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
|
||||
{
|
||||
#define EVENT_TYPE_NOT_DEFINED 0
|
||||
#define EVENT_TYPE_CQ 1
|
||||
#define EVENT_TYPE_QP 2
|
||||
struct qedr_dev *dev = (struct qedr_dev *)context;
|
||||
union event_ring_data *data = fw_handle;
|
||||
u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) +
|
||||
data->roce_handle.lo;
|
||||
u8 event_type = EVENT_TYPE_NOT_DEFINED;
|
||||
struct ib_event event;
|
||||
struct ib_cq *ibcq;
|
||||
struct ib_qp *ibqp;
|
||||
struct qedr_cq *cq;
|
||||
struct qedr_qp *qp;
|
||||
|
||||
switch (e_code) {
|
||||
case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
|
||||
event.event = IB_EVENT_CQ_ERR;
|
||||
event_type = EVENT_TYPE_CQ;
|
||||
break;
|
||||
case ROCE_ASYNC_EVENT_SQ_DRAINED:
|
||||
event.event = IB_EVENT_SQ_DRAINED;
|
||||
event_type = EVENT_TYPE_QP;
|
||||
break;
|
||||
case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
|
||||
event.event = IB_EVENT_QP_FATAL;
|
||||
event_type = EVENT_TYPE_QP;
|
||||
break;
|
||||
case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
|
||||
event.event = IB_EVENT_QP_REQ_ERR;
|
||||
event_type = EVENT_TYPE_QP;
|
||||
break;
|
||||
case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
|
||||
event.event = IB_EVENT_QP_ACCESS_ERR;
|
||||
event_type = EVENT_TYPE_QP;
|
||||
break;
|
||||
default:
|
||||
DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
|
||||
roce_handle64);
|
||||
}
|
||||
|
||||
switch (event_type) {
|
||||
case EVENT_TYPE_CQ:
|
||||
cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
|
||||
if (cq) {
|
||||
ibcq = &cq->ibcq;
|
||||
if (ibcq->event_handler) {
|
||||
event.device = ibcq->device;
|
||||
event.element.cq = ibcq;
|
||||
ibcq->event_handler(&event, ibcq->cq_context);
|
||||
}
|
||||
} else {
|
||||
WARN(1,
|
||||
"Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
|
||||
roce_handle64);
|
||||
}
|
||||
DP_ERR(dev, "CQ event %d on hanlde %p\n", e_code, cq);
|
||||
break;
|
||||
case EVENT_TYPE_QP:
|
||||
qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
|
||||
if (qp) {
|
||||
ibqp = &qp->ibqp;
|
||||
if (ibqp->event_handler) {
|
||||
event.device = ibqp->device;
|
||||
event.element.qp = ibqp;
|
||||
ibqp->event_handler(&event, ibqp->qp_context);
|
||||
}
|
||||
} else {
|
||||
WARN(1,
|
||||
"Error: QP event with NULL pointer ibqp. Handle=%llx\n",
|
||||
roce_handle64);
|
||||
}
|
||||
DP_ERR(dev, "QP event %d on hanlde %p\n", e_code, qp);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int qedr_init_hw(struct qedr_dev *dev)
|
||||
{
|
||||
struct qed_rdma_add_user_out_params out_params;
|
||||
struct qed_rdma_start_in_params *in_params;
|
||||
struct qed_rdma_cnq_params *cur_pbl;
|
||||
struct qed_rdma_events events;
|
||||
dma_addr_t p_phys_table;
|
||||
u32 page_cnt;
|
||||
int rc = 0;
|
||||
int i;
|
||||
|
||||
in_params = kzalloc(sizeof(*in_params), GFP_KERNEL);
|
||||
if (!in_params) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
in_params->desired_cnq = dev->num_cnq;
|
||||
for (i = 0; i < dev->num_cnq; i++) {
|
||||
cur_pbl = &in_params->cnq_pbl_list[i];
|
||||
|
||||
page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
|
||||
cur_pbl->num_pbl_pages = page_cnt;
|
||||
|
||||
p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
|
||||
cur_pbl->pbl_ptr = (u64)p_phys_table;
|
||||
}
|
||||
|
||||
events.affiliated_event = qedr_affiliated_event;
|
||||
events.unaffiliated_event = qedr_unaffiliated_event;
|
||||
events.context = dev;
|
||||
|
||||
in_params->events = &events;
|
||||
in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
|
||||
in_params->max_mtu = dev->ndev->mtu;
|
||||
ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
|
||||
|
||||
rc = dev->ops->rdma_init(dev->cdev, in_params);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr;
|
||||
dev->db_phys_addr = out_params.dpi_phys_addr;
|
||||
dev->db_size = out_params.dpi_size;
|
||||
dev->dpi = out_params.dpi;
|
||||
|
||||
rc = qedr_set_device_attr(dev);
|
||||
out:
|
||||
kfree(in_params);
|
||||
if (rc)
|
||||
DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void qedr_stop_hw(struct qedr_dev *dev)
|
||||
{
|
||||
dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
|
||||
dev->ops->rdma_stop(dev->rdma_ctx);
|
||||
}
|
||||
|
||||
static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
struct qed_dev_rdma_info dev_info;
|
||||
struct qedr_dev *dev;
|
||||
int rc = 0, i;
|
||||
|
||||
dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
|
||||
if (!dev) {
|
||||
pr_err("Unable to allocate ib device\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
|
||||
|
||||
dev->pdev = pdev;
|
||||
dev->ndev = ndev;
|
||||
dev->cdev = cdev;
|
||||
|
||||
qed_ops = qed_get_rdma_ops();
|
||||
if (!qed_ops) {
|
||||
DP_ERR(dev, "Failed to get qed roce operations\n");
|
||||
goto init_err;
|
||||
}
|
||||
|
||||
dev->ops = qed_ops;
|
||||
rc = qed_ops->fill_dev_info(cdev, &dev_info);
|
||||
if (rc)
|
||||
goto init_err;
|
||||
|
||||
dev->num_hwfns = dev_info.common.num_hwfns;
|
||||
dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
|
||||
|
||||
dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
|
||||
if (!dev->num_cnq) {
|
||||
DP_ERR(dev, "not enough CNQ resources.\n");
|
||||
goto init_err;
|
||||
}
|
||||
|
||||
dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
|
||||
|
||||
qedr_pci_set_atomic(dev, pdev);
|
||||
|
||||
rc = qedr_alloc_resources(dev);
|
||||
if (rc)
|
||||
goto init_err;
|
||||
|
||||
rc = qedr_init_hw(dev);
|
||||
if (rc)
|
||||
goto alloc_err;
|
||||
|
||||
rc = qedr_setup_irqs(dev);
|
||||
if (rc)
|
||||
goto irq_err;
|
||||
|
||||
rc = qedr_register_device(dev);
|
||||
if (rc) {
|
||||
DP_ERR(dev, "Unable to allocate register device\n");
|
||||
goto reg_err;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
|
||||
if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
|
||||
goto sysfs_err;
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
|
||||
return dev;
|
||||
|
||||
sysfs_err:
|
||||
ib_unregister_device(&dev->ibdev);
|
||||
reg_err:
|
||||
qedr_sync_free_irqs(dev);
|
||||
irq_err:
|
||||
qedr_stop_hw(dev);
|
||||
alloc_err:
|
||||
qedr_free_resources(dev);
|
||||
init_err:
|
||||
ib_dealloc_device(&dev->ibdev);
|
||||
DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void qedr_remove(struct qedr_dev *dev)
|
||||
{
|
||||
/* First unregister with stack to stop all the active traffic
|
||||
* of the registered clients.
|
||||
*/
|
||||
qedr_remove_sysfiles(dev);
|
||||
ib_unregister_device(&dev->ibdev);
|
||||
|
||||
qedr_stop_hw(dev);
|
||||
qedr_sync_free_irqs(dev);
|
||||
qedr_free_resources(dev);
|
||||
ib_dealloc_device(&dev->ibdev);
|
||||
}
|
||||
|
||||
static int qedr_close(struct qedr_dev *dev)
|
||||
{
|
||||
qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qedr_shutdown(struct qedr_dev *dev)
|
||||
{
|
||||
qedr_close(dev);
|
||||
qedr_remove(dev);
|
||||
}
|
||||
|
||||
static void qedr_mac_address_change(struct qedr_dev *dev)
|
||||
{
|
||||
union ib_gid *sgid = &dev->sgid_tbl[0];
|
||||
u8 guid[8], mac_addr[6];
|
||||
int rc;
|
||||
|
||||
/* Update SGID */
|
||||
ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
|
||||
guid[0] = mac_addr[0] ^ 2;
|
||||
guid[1] = mac_addr[1];
|
||||
guid[2] = mac_addr[2];
|
||||
guid[3] = 0xff;
|
||||
guid[4] = 0xfe;
|
||||
guid[5] = mac_addr[3];
|
||||
guid[6] = mac_addr[4];
|
||||
guid[7] = mac_addr[5];
|
||||
sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
|
||||
memcpy(&sgid->raw[8], guid, sizeof(guid));
|
||||
|
||||
/* Update LL2 */
|
||||
rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev,
|
||||
dev->gsi_ll2_mac_address,
|
||||
dev->ndev->dev_addr);
|
||||
|
||||
ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
|
||||
|
||||
qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
|
||||
|
||||
if (rc)
|
||||
DP_ERR(dev, "Error updating mac filter\n");
|
||||
}
|
||||
|
||||
/* event handling via NIC driver ensures that all the NIC specific
|
||||
* initialization done before RoCE driver notifies
|
||||
* event to stack.
|
||||
*/
|
||||
static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
|
||||
{
|
||||
switch (event) {
|
||||
case QEDE_UP:
|
||||
qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
|
||||
break;
|
||||
case QEDE_DOWN:
|
||||
qedr_close(dev);
|
||||
break;
|
||||
case QEDE_CLOSE:
|
||||
qedr_shutdown(dev);
|
||||
break;
|
||||
case QEDE_CHANGE_ADDR:
|
||||
qedr_mac_address_change(dev);
|
||||
break;
|
||||
default:
|
||||
pr_err("Event not supported\n");
|
||||
}
|
||||
}
|
||||
|
||||
static struct qedr_driver qedr_drv = {
|
||||
.name = "qedr_driver",
|
||||
.add = qedr_add,
|
||||
.remove = qedr_remove,
|
||||
.notify = qedr_notify,
|
||||
};
|
||||
|
||||
static int __init qedr_init_module(void)
|
||||
{
|
||||
return qede_roce_register_driver(&qedr_drv);
|
||||
}
|
||||
|
||||
static void __exit qedr_exit_module(void)
|
||||
{
|
||||
qede_roce_unregister_driver(&qedr_drv);
|
||||
}
|
||||
|
||||
module_init(qedr_init_module);
|
||||
module_exit(qedr_exit_module);
|
495
drivers/infiniband/hw/qedr/qedr.h
Normal file
495
drivers/infiniband/hw/qedr/qedr.h
Normal file
@ -0,0 +1,495 @@
|
||||
/* QLogic qedr NIC Driver
|
||||
* Copyright (c) 2015-2016 QLogic Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and /or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef __QEDR_H__
|
||||
#define __QEDR_H__
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <rdma/ib_addr.h>
|
||||
#include <linux/qed/qed_if.h>
|
||||
#include <linux/qed/qed_chain.h>
|
||||
#include <linux/qed/qed_roce_if.h>
|
||||
#include <linux/qed/qede_roce.h>
|
||||
#include "qedr_hsi.h"
|
||||
|
||||
#define QEDR_MODULE_VERSION "8.10.10.0"
|
||||
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
|
||||
#define DP_NAME(dev) ((dev)->ibdev.name)
|
||||
|
||||
#define DP_DEBUG(dev, module, fmt, ...) \
|
||||
pr_debug("(%s) " module ": " fmt, \
|
||||
DP_NAME(dev) ? DP_NAME(dev) : "", ## __VA_ARGS__)
|
||||
|
||||
#define QEDR_MSG_INIT "INIT"
|
||||
#define QEDR_MSG_MISC "MISC"
|
||||
#define QEDR_MSG_CQ " CQ"
|
||||
#define QEDR_MSG_MR " MR"
|
||||
#define QEDR_MSG_RQ " RQ"
|
||||
#define QEDR_MSG_SQ " SQ"
|
||||
#define QEDR_MSG_QP " QP"
|
||||
#define QEDR_MSG_GSI " GSI"
|
||||
|
||||
#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
|
||||
|
||||
struct qedr_dev;
|
||||
|
||||
struct qedr_cnq {
|
||||
struct qedr_dev *dev;
|
||||
struct qed_chain pbl;
|
||||
struct qed_sb_info *sb;
|
||||
char name[32];
|
||||
u64 n_comp;
|
||||
__le16 *hw_cons_ptr;
|
||||
u8 index;
|
||||
};
|
||||
|
||||
#define QEDR_MAX_SGID 128
|
||||
|
||||
struct qedr_device_attr {
|
||||
u32 vendor_id;
|
||||
u32 vendor_part_id;
|
||||
u32 hw_ver;
|
||||
u64 fw_ver;
|
||||
u64 node_guid;
|
||||
u64 sys_image_guid;
|
||||
u8 max_cnq;
|
||||
u8 max_sge;
|
||||
u16 max_inline;
|
||||
u32 max_sqe;
|
||||
u32 max_rqe;
|
||||
u8 max_qp_resp_rd_atomic_resc;
|
||||
u8 max_qp_req_rd_atomic_resc;
|
||||
u64 max_dev_resp_rd_atomic_resc;
|
||||
u32 max_cq;
|
||||
u32 max_qp;
|
||||
u32 max_mr;
|
||||
u64 max_mr_size;
|
||||
u32 max_cqe;
|
||||
u32 max_mw;
|
||||
u32 max_fmr;
|
||||
u32 max_mr_mw_fmr_pbl;
|
||||
u64 max_mr_mw_fmr_size;
|
||||
u32 max_pd;
|
||||
u32 max_ah;
|
||||
u8 max_pkey;
|
||||
u32 max_srq;
|
||||
u32 max_srq_wr;
|
||||
u8 max_srq_sge;
|
||||
u8 max_stats_queues;
|
||||
u32 dev_caps;
|
||||
|
||||
u64 page_size_caps;
|
||||
u8 dev_ack_delay;
|
||||
u32 reserved_lkey;
|
||||
u32 bad_pkey_counter;
|
||||
struct qed_rdma_events events;
|
||||
};
|
||||
|
||||
struct qedr_dev {
|
||||
struct ib_device ibdev;
|
||||
struct qed_dev *cdev;
|
||||
struct pci_dev *pdev;
|
||||
struct net_device *ndev;
|
||||
|
||||
enum ib_atomic_cap atomic_cap;
|
||||
|
||||
void *rdma_ctx;
|
||||
struct qedr_device_attr attr;
|
||||
|
||||
const struct qed_rdma_ops *ops;
|
||||
struct qed_int_info int_info;
|
||||
|
||||
struct qed_sb_info *sb_array;
|
||||
struct qedr_cnq *cnq_array;
|
||||
int num_cnq;
|
||||
int sb_start;
|
||||
|
||||
void __iomem *db_addr;
|
||||
u64 db_phys_addr;
|
||||
u32 db_size;
|
||||
u16 dpi;
|
||||
|
||||
union ib_gid *sgid_tbl;
|
||||
|
||||
/* Lock for sgid table */
|
||||
spinlock_t sgid_lock;
|
||||
|
||||
u64 guid;
|
||||
|
||||
u32 dp_module;
|
||||
u8 dp_level;
|
||||
u8 num_hwfns;
|
||||
uint wq_multiplier;
|
||||
u8 gsi_ll2_mac_address[ETH_ALEN];
|
||||
int gsi_qp_created;
|
||||
struct qedr_cq *gsi_sqcq;
|
||||
struct qedr_cq *gsi_rqcq;
|
||||
struct qedr_qp *gsi_qp;
|
||||
};
|
||||
|
||||
#define QEDR_MAX_SQ_PBL (0x8000)
|
||||
#define QEDR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *))
|
||||
#define QEDR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge))
|
||||
#define QEDR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
|
||||
QEDR_SQE_ELEMENT_SIZE)
|
||||
#define QEDR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
|
||||
QEDR_SQE_ELEMENT_SIZE)
|
||||
#define QEDR_MAX_SQE ((QEDR_MAX_SQ_PBL_ENTRIES) *\
|
||||
(RDMA_RING_PAGE_SIZE) / \
|
||||
(QEDR_SQE_ELEMENT_SIZE) /\
|
||||
(QEDR_MAX_SQE_ELEMENTS_PER_SQE))
|
||||
/* RQ */
|
||||
#define QEDR_MAX_RQ_PBL (0x2000)
|
||||
#define QEDR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *))
|
||||
#define QEDR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge))
|
||||
#define QEDR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE)
|
||||
#define QEDR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
|
||||
QEDR_RQE_ELEMENT_SIZE)
|
||||
#define QEDR_MAX_RQE ((QEDR_MAX_RQ_PBL_ENTRIES) *\
|
||||
(RDMA_RING_PAGE_SIZE) / \
|
||||
(QEDR_RQE_ELEMENT_SIZE) /\
|
||||
(QEDR_MAX_RQE_ELEMENTS_PER_RQE))
|
||||
|
||||
#define QEDR_CQE_SIZE (sizeof(union rdma_cqe))
|
||||
#define QEDR_MAX_CQE_PBL_SIZE (512 * 1024)
|
||||
#define QEDR_MAX_CQE_PBL_ENTRIES (((QEDR_MAX_CQE_PBL_SIZE) / \
|
||||
sizeof(u64)) - 1)
|
||||
#define QEDR_MAX_CQES ((u32)((QEDR_MAX_CQE_PBL_ENTRIES) * \
|
||||
(QED_CHAIN_PAGE_SIZE) / QEDR_CQE_SIZE))
|
||||
|
||||
#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
|
||||
|
||||
#define QEDR_MAX_PORT (1)
|
||||
|
||||
#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
|
||||
|
||||
#define QEDR_ROCE_PKEY_MAX 1
|
||||
#define QEDR_ROCE_PKEY_TABLE_LEN 1
|
||||
#define QEDR_ROCE_PKEY_DEFAULT 0xffff
|
||||
|
||||
struct qedr_pbl {
|
||||
struct list_head list_entry;
|
||||
void *va;
|
||||
dma_addr_t pa;
|
||||
};
|
||||
|
||||
struct qedr_ucontext {
|
||||
struct ib_ucontext ibucontext;
|
||||
struct qedr_dev *dev;
|
||||
struct qedr_pd *pd;
|
||||
u64 dpi_addr;
|
||||
u64 dpi_phys_addr;
|
||||
u32 dpi_size;
|
||||
u16 dpi;
|
||||
|
||||
struct list_head mm_head;
|
||||
|
||||
/* Lock to protect mm list */
|
||||
struct mutex mm_list_lock;
|
||||
};
|
||||
|
||||
union db_prod64 {
|
||||
struct rdma_pwm_val32_data data;
|
||||
u64 raw;
|
||||
};
|
||||
|
||||
enum qedr_cq_type {
|
||||
QEDR_CQ_TYPE_GSI,
|
||||
QEDR_CQ_TYPE_KERNEL,
|
||||
QEDR_CQ_TYPE_USER,
|
||||
};
|
||||
|
||||
struct qedr_pbl_info {
|
||||
u32 num_pbls;
|
||||
u32 num_pbes;
|
||||
u32 pbl_size;
|
||||
u32 pbe_size;
|
||||
bool two_layered;
|
||||
};
|
||||
|
||||
struct qedr_userq {
|
||||
struct ib_umem *umem;
|
||||
struct qedr_pbl_info pbl_info;
|
||||
struct qedr_pbl *pbl_tbl;
|
||||
u64 buf_addr;
|
||||
size_t buf_len;
|
||||
};
|
||||
|
||||
struct qedr_cq {
|
||||
struct ib_cq ibcq;
|
||||
|
||||
enum qedr_cq_type cq_type;
|
||||
u32 sig;
|
||||
|
||||
u16 icid;
|
||||
|
||||
/* Lock to protect completion handler */
|
||||
spinlock_t comp_handler_lock;
|
||||
|
||||
/* Lock to protect multiplem CQ's */
|
||||
spinlock_t cq_lock;
|
||||
u8 arm_flags;
|
||||
struct qed_chain pbl;
|
||||
|
||||
void __iomem *db_addr;
|
||||
union db_prod64 db;
|
||||
|
||||
u8 pbl_toggle;
|
||||
union rdma_cqe *latest_cqe;
|
||||
union rdma_cqe *toggle_cqe;
|
||||
|
||||
u32 cq_cons;
|
||||
|
||||
struct qedr_userq q;
|
||||
};
|
||||
|
||||
struct qedr_pd {
|
||||
struct ib_pd ibpd;
|
||||
u32 pd_id;
|
||||
struct qedr_ucontext *uctx;
|
||||
};
|
||||
|
||||
struct qedr_mm {
|
||||
struct {
|
||||
u64 phy_addr;
|
||||
unsigned long len;
|
||||
} key;
|
||||
struct list_head entry;
|
||||
};
|
||||
|
||||
union db_prod32 {
|
||||
struct rdma_pwm_val16_data data;
|
||||
u32 raw;
|
||||
};
|
||||
|
||||
struct qedr_qp_hwq_info {
|
||||
/* WQE Elements */
|
||||
struct qed_chain pbl;
|
||||
u64 p_phys_addr_tbl;
|
||||
u32 max_sges;
|
||||
|
||||
/* WQE */
|
||||
u16 prod;
|
||||
u16 cons;
|
||||
u16 wqe_cons;
|
||||
u16 gsi_cons;
|
||||
u16 max_wr;
|
||||
|
||||
/* DB */
|
||||
void __iomem *db;
|
||||
union db_prod32 db_data;
|
||||
};
|
||||
|
||||
#define QEDR_INC_SW_IDX(p_info, index) \
|
||||
do { \
|
||||
p_info->index = (p_info->index + 1) & \
|
||||
qed_chain_get_capacity(p_info->pbl) \
|
||||
} while (0)
|
||||
|
||||
enum qedr_qp_err_bitmap {
|
||||
QEDR_QP_ERR_SQ_FULL = 1,
|
||||
QEDR_QP_ERR_RQ_FULL = 2,
|
||||
QEDR_QP_ERR_BAD_SR = 4,
|
||||
QEDR_QP_ERR_BAD_RR = 8,
|
||||
QEDR_QP_ERR_SQ_PBL_FULL = 16,
|
||||
QEDR_QP_ERR_RQ_PBL_FULL = 32,
|
||||
};
|
||||
|
||||
struct qedr_qp {
|
||||
struct ib_qp ibqp; /* must be first */
|
||||
struct qedr_dev *dev;
|
||||
|
||||
struct qedr_qp_hwq_info sq;
|
||||
struct qedr_qp_hwq_info rq;
|
||||
|
||||
u32 max_inline_data;
|
||||
|
||||
/* Lock for QP's */
|
||||
spinlock_t q_lock;
|
||||
struct qedr_cq *sq_cq;
|
||||
struct qedr_cq *rq_cq;
|
||||
struct qedr_srq *srq;
|
||||
enum qed_roce_qp_state state;
|
||||
u32 id;
|
||||
struct qedr_pd *pd;
|
||||
enum ib_qp_type qp_type;
|
||||
struct qed_rdma_qp *qed_qp;
|
||||
u32 qp_id;
|
||||
u16 icid;
|
||||
u16 mtu;
|
||||
int sgid_idx;
|
||||
u32 rq_psn;
|
||||
u32 sq_psn;
|
||||
u32 qkey;
|
||||
u32 dest_qp_num;
|
||||
|
||||
/* Relevant to qps created from kernel space only (ULPs) */
|
||||
u8 prev_wqe_size;
|
||||
u16 wqe_cons;
|
||||
u32 err_bitmap;
|
||||
bool signaled;
|
||||
|
||||
/* SQ shadow */
|
||||
struct {
|
||||
u64 wr_id;
|
||||
enum ib_wc_opcode opcode;
|
||||
u32 bytes_len;
|
||||
u8 wqe_size;
|
||||
bool signaled;
|
||||
dma_addr_t icrc_mapping;
|
||||
u32 *icrc;
|
||||
struct qedr_mr *mr;
|
||||
} *wqe_wr_id;
|
||||
|
||||
/* RQ shadow */
|
||||
struct {
|
||||
u64 wr_id;
|
||||
struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
|
||||
u8 wqe_size;
|
||||
|
||||
u8 smac[ETH_ALEN];
|
||||
u16 vlan_id;
|
||||
int rc;
|
||||
} *rqe_wr_id;
|
||||
|
||||
/* Relevant to qps created from user space only (applications) */
|
||||
struct qedr_userq usq;
|
||||
struct qedr_userq urq;
|
||||
};
|
||||
|
||||
struct qedr_ah {
|
||||
struct ib_ah ibah;
|
||||
struct ib_ah_attr attr;
|
||||
};
|
||||
|
||||
enum qedr_mr_type {
|
||||
QEDR_MR_USER,
|
||||
QEDR_MR_KERNEL,
|
||||
QEDR_MR_DMA,
|
||||
QEDR_MR_FRMR,
|
||||
};
|
||||
|
||||
struct mr_info {
|
||||
struct qedr_pbl *pbl_table;
|
||||
struct qedr_pbl_info pbl_info;
|
||||
struct list_head free_pbl_list;
|
||||
struct list_head inuse_pbl_list;
|
||||
u32 completed;
|
||||
u32 completed_handled;
|
||||
};
|
||||
|
||||
struct qedr_mr {
|
||||
struct ib_mr ibmr;
|
||||
struct ib_umem *umem;
|
||||
|
||||
struct qed_rdma_register_tid_in_params hw_mr;
|
||||
enum qedr_mr_type type;
|
||||
|
||||
struct qedr_dev *dev;
|
||||
struct mr_info info;
|
||||
|
||||
u64 *pages;
|
||||
u32 npages;
|
||||
};
|
||||
|
||||
#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT)))
|
||||
|
||||
#define QEDR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
|
||||
RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
|
||||
#define QEDR_RESP_RDMA (RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
|
||||
RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
|
||||
#define QEDR_RESP_RDMA_IMM (QEDR_RESP_IMM | QEDR_RESP_RDMA)
|
||||
|
||||
static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info)
|
||||
{
|
||||
info->cons = (info->cons + 1) % info->max_wr;
|
||||
info->wqe_cons++;
|
||||
}
|
||||
|
||||
static inline void qedr_inc_sw_prod(struct qedr_qp_hwq_info *info)
|
||||
{
|
||||
info->prod = (info->prod + 1) % info->max_wr;
|
||||
}
|
||||
|
||||
static inline int qedr_get_dmac(struct qedr_dev *dev,
|
||||
struct ib_ah_attr *ah_attr, u8 *mac_addr)
|
||||
{
|
||||
union ib_gid zero_sgid = { { 0 } };
|
||||
struct in6_addr in6;
|
||||
|
||||
if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) {
|
||||
DP_ERR(dev, "Local port GID not supported\n");
|
||||
eth_zero_addr(mac_addr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
|
||||
ether_addr_copy(mac_addr, ah_attr->dmac);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext)
|
||||
{
|
||||
return container_of(ibucontext, struct qedr_ucontext, ibucontext);
|
||||
}
|
||||
|
||||
static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev)
|
||||
{
|
||||
return container_of(ibdev, struct qedr_dev, ibdev);
|
||||
}
|
||||
|
||||
static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd)
|
||||
{
|
||||
return container_of(ibpd, struct qedr_pd, ibpd);
|
||||
}
|
||||
|
||||
static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq)
|
||||
{
|
||||
return container_of(ibcq, struct qedr_cq, ibcq);
|
||||
}
|
||||
|
||||
static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp)
|
||||
{
|
||||
return container_of(ibqp, struct qedr_qp, ibqp);
|
||||
}
|
||||
|
||||
static inline struct qedr_ah *get_qedr_ah(struct ib_ah *ibah)
|
||||
{
|
||||
return container_of(ibah, struct qedr_ah, ibah);
|
||||
}
|
||||
|
||||
static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr)
|
||||
{
|
||||
return container_of(ibmr, struct qedr_mr, ibmr);
|
||||
}
|
||||
#endif
|
622
drivers/infiniband/hw/qedr/qedr_cm.c
Normal file
622
drivers/infiniband/hw/qedr/qedr_cm.c
Normal file
@ -0,0 +1,622 @@
|
||||
/* QLogic qedr NIC Driver
|
||||
* Copyright (c) 2015-2016 QLogic Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and /or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <net/udp.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include <rdma/iw_cm.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/ib_addr.h>
|
||||
#include <rdma/ib_cache.h>
|
||||
|
||||
#include "qedr_hsi.h"
|
||||
#include <linux/qed/qed_if.h>
|
||||
#include <linux/qed/qed_roce_if.h>
|
||||
#include "qedr.h"
|
||||
#include "qedr_hsi.h"
|
||||
#include "verbs.h"
|
||||
#include <rdma/qedr-abi.h>
|
||||
#include "qedr_hsi.h"
|
||||
#include "qedr_cm.h"
|
||||
|
||||
void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
|
||||
{
|
||||
info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
|
||||
}
|
||||
|
||||
void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
|
||||
struct ib_qp_init_attr *attrs)
|
||||
{
|
||||
dev->gsi_qp_created = 1;
|
||||
dev->gsi_sqcq = get_qedr_cq(attrs->send_cq);
|
||||
dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq);
|
||||
dev->gsi_qp = qp;
|
||||
}
|
||||
|
||||
void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
|
||||
{
|
||||
struct qedr_dev *dev = (struct qedr_dev *)_qdev;
|
||||
struct qedr_cq *cq = dev->gsi_sqcq;
|
||||
struct qedr_qp *qp = dev->gsi_qp;
|
||||
unsigned long flags;
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_GSI,
|
||||
"LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
|
||||
dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
|
||||
cq->ibcq.comp_handler ? "Yes" : "No");
|
||||
|
||||
dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
|
||||
pkt->header.baddr);
|
||||
kfree(pkt);
|
||||
|
||||
spin_lock_irqsave(&qp->q_lock, flags);
|
||||
qedr_inc_sw_gsi_cons(&qp->sq);
|
||||
spin_unlock_irqrestore(&qp->q_lock, flags);
|
||||
|
||||
if (cq->ibcq.comp_handler) {
|
||||
spin_lock_irqsave(&cq->comp_handler_lock, flags);
|
||||
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
|
||||
struct qed_roce_ll2_rx_params *params)
|
||||
{
|
||||
struct qedr_dev *dev = (struct qedr_dev *)_dev;
|
||||
struct qedr_cq *cq = dev->gsi_rqcq;
|
||||
struct qedr_qp *qp = dev->gsi_qp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qp->q_lock, flags);
|
||||
|
||||
qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc;
|
||||
qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id;
|
||||
qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len;
|
||||
ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac);
|
||||
|
||||
qedr_inc_sw_gsi_cons(&qp->rq);
|
||||
|
||||
spin_unlock_irqrestore(&qp->q_lock, flags);
|
||||
|
||||
if (cq->ibcq.comp_handler) {
|
||||
spin_lock_irqsave(&cq->comp_handler_lock, flags);
|
||||
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
|
||||
struct ib_qp_init_attr *attrs)
|
||||
{
|
||||
struct qed_rdma_destroy_cq_in_params iparams;
|
||||
struct qed_rdma_destroy_cq_out_params oparams;
|
||||
struct qedr_cq *cq;
|
||||
|
||||
cq = get_qedr_cq(attrs->send_cq);
|
||||
iparams.icid = cq->icid;
|
||||
dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
|
||||
dev->ops->common->chain_free(dev->cdev, &cq->pbl);
|
||||
|
||||
cq = get_qedr_cq(attrs->recv_cq);
|
||||
/* if a dedicated recv_cq was used, delete it too */
|
||||
if (iparams.icid != cq->icid) {
|
||||
iparams.icid = cq->icid;
|
||||
dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
|
||||
dev->ops->common->chain_free(dev->cdev, &cq->pbl);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
|
||||
struct ib_qp_init_attr *attrs)
|
||||
{
|
||||
if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) {
|
||||
DP_ERR(dev,
|
||||
" create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
|
||||
attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) {
|
||||
DP_ERR(dev,
|
||||
" create gsi qp: failed. max_recv_wr is too large %d>%d\n",
|
||||
attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) {
|
||||
DP_ERR(dev,
|
||||
" create gsi qp: failed. max_send_wr is too large %d>%d\n",
|
||||
attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
|
||||
struct ib_qp_init_attr *attrs,
|
||||
struct qedr_qp *qp)
|
||||
{
|
||||
struct qed_roce_ll2_params ll2_params;
|
||||
int rc;
|
||||
|
||||
rc = qedr_check_gsi_qp_attrs(dev, attrs);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
|
||||
/* configure and start LL2 */
|
||||
memset(&ll2_params, 0, sizeof(ll2_params));
|
||||
ll2_params.max_tx_buffers = attrs->cap.max_send_wr;
|
||||
ll2_params.max_rx_buffers = attrs->cap.max_recv_wr;
|
||||
ll2_params.cbs.tx_cb = qedr_ll2_tx_cb;
|
||||
ll2_params.cbs.rx_cb = qedr_ll2_rx_cb;
|
||||
ll2_params.cb_cookie = (void *)dev;
|
||||
ll2_params.mtu = dev->ndev->mtu;
|
||||
ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr);
|
||||
rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params);
|
||||
if (rc) {
|
||||
DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
/* create QP */
|
||||
qp->ibqp.qp_num = 1;
|
||||
qp->rq.max_wr = attrs->cap.max_recv_wr;
|
||||
qp->sq.max_wr = attrs->cap.max_send_wr;
|
||||
|
||||
qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
|
||||
GFP_KERNEL);
|
||||
if (!qp->rqe_wr_id)
|
||||
goto err;
|
||||
qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
|
||||
GFP_KERNEL);
|
||||
if (!qp->wqe_wr_id)
|
||||
goto err;
|
||||
|
||||
qedr_store_gsi_qp_cq(dev, qp, attrs);
|
||||
ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
|
||||
|
||||
/* the GSI CQ is handled by the driver so remove it from the FW */
|
||||
qedr_destroy_gsi_cq(dev, attrs);
|
||||
dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
|
||||
dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
|
||||
|
||||
return &qp->ibqp;
|
||||
|
||||
err:
|
||||
kfree(qp->rqe_wr_id);
|
||||
|
||||
rc = dev->ops->roce_ll2_stop(dev->cdev);
|
||||
if (rc)
|
||||
DP_ERR(dev, "create gsi qp: failed destroy on create\n");
|
||||
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
int qedr_destroy_gsi_qp(struct qedr_dev *dev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = dev->ops->roce_ll2_stop(dev->cdev);
|
||||
if (rc)
|
||||
DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc);
|
||||
else
|
||||
DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n");
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
#define QEDR_MAX_UD_HEADER_SIZE (100)
|
||||
#define QEDR_GSI_QPN (1)
|
||||
static inline int qedr_gsi_build_header(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp,
|
||||
struct ib_send_wr *swr,
|
||||
struct ib_ud_header *udh,
|
||||
int *roce_mode)
|
||||
{
|
||||
bool has_vlan = false, has_grh_ipv6 = true;
|
||||
struct ib_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
|
||||
struct ib_global_route *grh = &ah_attr->grh;
|
||||
union ib_gid sgid;
|
||||
int send_size = 0;
|
||||
u16 vlan_id = 0;
|
||||
u16 ether_type;
|
||||
struct ib_gid_attr sgid_attr;
|
||||
int rc;
|
||||
int ip_ver = 0;
|
||||
|
||||
bool has_udp = false;
|
||||
int i;
|
||||
|
||||
send_size = 0;
|
||||
for (i = 0; i < swr->num_sge; ++i)
|
||||
send_size += swr->sg_list[i].length;
|
||||
|
||||
rc = ib_get_cached_gid(qp->ibqp.device, ah_attr->port_num,
|
||||
grh->sgid_index, &sgid, &sgid_attr);
|
||||
if (rc) {
|
||||
DP_ERR(dev,
|
||||
"gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
|
||||
ah_attr->port_num, grh->sgid_index);
|
||||
return rc;
|
||||
}
|
||||
|
||||
vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
|
||||
if (vlan_id < VLAN_CFI_MASK)
|
||||
has_vlan = true;
|
||||
if (sgid_attr.ndev)
|
||||
dev_put(sgid_attr.ndev);
|
||||
|
||||
if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
|
||||
DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
|
||||
ah_attr->grh.sgid_index);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
|
||||
if (!has_udp) {
|
||||
/* RoCE v1 */
|
||||
ether_type = ETH_P_ROCE;
|
||||
*roce_mode = ROCE_V1;
|
||||
} else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
|
||||
/* RoCE v2 IPv4 */
|
||||
ip_ver = 4;
|
||||
ether_type = ETH_P_IP;
|
||||
has_grh_ipv6 = false;
|
||||
*roce_mode = ROCE_V2_IPV4;
|
||||
} else {
|
||||
/* RoCE v2 IPv6 */
|
||||
ip_ver = 6;
|
||||
ether_type = ETH_P_IPV6;
|
||||
*roce_mode = ROCE_V2_IPV6;
|
||||
}
|
||||
|
||||
rc = ib_ud_header_init(send_size, false, true, has_vlan,
|
||||
has_grh_ipv6, ip_ver, has_udp, 0, udh);
|
||||
if (rc) {
|
||||
DP_ERR(dev, "gsi post send: failed to init header\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* ENET + VLAN headers */
|
||||
ether_addr_copy(udh->eth.dmac_h, ah_attr->dmac);
|
||||
ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
|
||||
if (has_vlan) {
|
||||
udh->eth.type = htons(ETH_P_8021Q);
|
||||
udh->vlan.tag = htons(vlan_id);
|
||||
udh->vlan.type = htons(ether_type);
|
||||
} else {
|
||||
udh->eth.type = htons(ether_type);
|
||||
}
|
||||
|
||||
/* BTH */
|
||||
udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
|
||||
udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
|
||||
udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
|
||||
udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
|
||||
udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
|
||||
|
||||
/* DETH */
|
||||
udh->deth.qkey = htonl(0x80010000);
|
||||
udh->deth.source_qpn = htonl(QEDR_GSI_QPN);
|
||||
|
||||
if (has_grh_ipv6) {
|
||||
/* GRH / IPv6 header */
|
||||
udh->grh.traffic_class = grh->traffic_class;
|
||||
udh->grh.flow_label = grh->flow_label;
|
||||
udh->grh.hop_limit = grh->hop_limit;
|
||||
udh->grh.destination_gid = grh->dgid;
|
||||
memcpy(&udh->grh.source_gid.raw, &sgid.raw,
|
||||
sizeof(udh->grh.source_gid.raw));
|
||||
} else {
|
||||
/* IPv4 header */
|
||||
u32 ipv4_addr;
|
||||
|
||||
udh->ip4.protocol = IPPROTO_UDP;
|
||||
udh->ip4.tos = htonl(ah_attr->grh.flow_label);
|
||||
udh->ip4.frag_off = htons(IP_DF);
|
||||
udh->ip4.ttl = ah_attr->grh.hop_limit;
|
||||
|
||||
ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
|
||||
udh->ip4.saddr = ipv4_addr;
|
||||
ipv4_addr = qedr_get_ipv4_from_gid(ah_attr->grh.dgid.raw);
|
||||
udh->ip4.daddr = ipv4_addr;
|
||||
/* note: checksum is calculated by the device */
|
||||
}
|
||||
|
||||
/* UDP */
|
||||
if (has_udp) {
|
||||
udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
|
||||
udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
|
||||
udh->udp.csum = 0;
|
||||
/* UDP length is untouched hence is zero */
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp,
|
||||
struct ib_send_wr *swr,
|
||||
struct qed_roce_ll2_packet **p_packet)
|
||||
{
|
||||
u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
|
||||
struct qed_roce_ll2_packet *packet;
|
||||
struct pci_dev *pdev = dev->pdev;
|
||||
int roce_mode, header_size;
|
||||
struct ib_ud_header udh;
|
||||
int i, rc;
|
||||
|
||||
*p_packet = NULL;
|
||||
|
||||
rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
|
||||
|
||||
packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
|
||||
if (!packet)
|
||||
return -ENOMEM;
|
||||
|
||||
packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
|
||||
&packet->header.baddr,
|
||||
GFP_ATOMIC);
|
||||
if (!packet->header.vaddr) {
|
||||
kfree(packet);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
|
||||
packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
|
||||
else
|
||||
packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
|
||||
|
||||
packet->roce_mode = roce_mode;
|
||||
memcpy(packet->header.vaddr, ud_header_buffer, header_size);
|
||||
packet->header.len = header_size;
|
||||
packet->n_seg = swr->num_sge;
|
||||
for (i = 0; i < packet->n_seg; i++) {
|
||||
packet->payload[i].baddr = swr->sg_list[i].addr;
|
||||
packet->payload[i].len = swr->sg_list[i].length;
|
||||
}
|
||||
|
||||
*p_packet = packet;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
{
|
||||
struct qed_roce_ll2_packet *pkt = NULL;
|
||||
struct qedr_qp *qp = get_qedr_qp(ibqp);
|
||||
struct qed_roce_ll2_tx_params params;
|
||||
struct qedr_dev *dev = qp->dev;
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
if (qp->state != QED_ROCE_QP_STATE_RTS) {
|
||||
*bad_wr = wr;
|
||||
DP_ERR(dev,
|
||||
"gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
|
||||
qp->state);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
|
||||
DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n",
|
||||
wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE);
|
||||
rc = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (wr->opcode != IB_WR_SEND) {
|
||||
DP_ERR(dev,
|
||||
"gsi post send: failed due to unsupported opcode %d\n",
|
||||
wr->opcode);
|
||||
rc = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
|
||||
spin_lock_irqsave(&qp->q_lock, flags);
|
||||
|
||||
rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
|
||||
if (rc) {
|
||||
spin_unlock_irqrestore(&qp->q_lock, flags);
|
||||
goto err;
|
||||
}
|
||||
|
||||
rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, ¶ms);
|
||||
if (!rc) {
|
||||
qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
|
||||
qedr_inc_sw_prod(&qp->sq);
|
||||
DP_DEBUG(qp->dev, QEDR_MSG_GSI,
|
||||
"gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
|
||||
wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
|
||||
} else {
|
||||
if (rc == QED_ROCE_TX_HEAD_FAILURE) {
|
||||
/* TX failed while posting header - release resources */
|
||||
dma_free_coherent(&dev->pdev->dev, pkt->header.len,
|
||||
pkt->header.vaddr, pkt->header.baddr);
|
||||
kfree(pkt);
|
||||
} else if (rc == QED_ROCE_TX_FRAG_FAILURE) {
|
||||
/* NTD since TX failed while posting a fragment. We will
|
||||
* release the resources on TX callback
|
||||
*/
|
||||
}
|
||||
|
||||
DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
|
||||
rc = -EAGAIN;
|
||||
*bad_wr = wr;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&qp->q_lock, flags);
|
||||
|
||||
if (wr->next) {
|
||||
DP_ERR(dev,
|
||||
"gsi post send: failed second WR. Only one WR may be passed at a time\n");
|
||||
*bad_wr = wr->next;
|
||||
rc = -EINVAL;
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
||||
err:
|
||||
*bad_wr = wr;
|
||||
return rc;
|
||||
}
|
||||
|
||||
int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr)
|
||||
{
|
||||
struct qedr_dev *dev = get_qedr_dev(ibqp->device);
|
||||
struct qedr_qp *qp = get_qedr_qp(ibqp);
|
||||
struct qed_roce_ll2_buffer buf;
|
||||
unsigned long flags;
|
||||
int status = 0;
|
||||
int rc;
|
||||
|
||||
if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
|
||||
(qp->state != QED_ROCE_QP_STATE_RTS)) {
|
||||
*bad_wr = wr;
|
||||
DP_ERR(dev,
|
||||
"gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
|
||||
qp->state);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&buf, 0, sizeof(buf));
|
||||
|
||||
spin_lock_irqsave(&qp->q_lock, flags);
|
||||
|
||||
while (wr) {
|
||||
if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) {
|
||||
DP_ERR(dev,
|
||||
"gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
|
||||
wr->num_sge, QEDR_GSI_MAX_RECV_SGE);
|
||||
goto err;
|
||||
}
|
||||
|
||||
buf.baddr = wr->sg_list[0].addr;
|
||||
buf.len = wr->sg_list[0].length;
|
||||
|
||||
rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1);
|
||||
if (rc) {
|
||||
DP_ERR(dev,
|
||||
"gsi post recv: failed to post rx buffer (rc=%d)\n",
|
||||
rc);
|
||||
goto err;
|
||||
}
|
||||
|
||||
memset(&qp->rqe_wr_id[qp->rq.prod], 0,
|
||||
sizeof(qp->rqe_wr_id[qp->rq.prod]));
|
||||
qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
|
||||
qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
|
||||
|
||||
qedr_inc_sw_prod(&qp->rq);
|
||||
|
||||
wr = wr->next;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&qp->q_lock, flags);
|
||||
|
||||
return status;
|
||||
err:
|
||||
spin_unlock_irqrestore(&qp->q_lock, flags);
|
||||
*bad_wr = wr;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
|
||||
{
|
||||
struct qedr_dev *dev = get_qedr_dev(ibcq->device);
|
||||
struct qedr_cq *cq = get_qedr_cq(ibcq);
|
||||
struct qedr_qp *qp = dev->gsi_qp;
|
||||
unsigned long flags;
|
||||
int i = 0;
|
||||
|
||||
spin_lock_irqsave(&cq->cq_lock, flags);
|
||||
|
||||
while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
|
||||
memset(&wc[i], 0, sizeof(*wc));
|
||||
|
||||
wc[i].qp = &qp->ibqp;
|
||||
wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
|
||||
wc[i].opcode = IB_WC_RECV;
|
||||
wc[i].pkey_index = 0;
|
||||
wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
|
||||
IB_WC_GENERAL_ERR : IB_WC_SUCCESS;
|
||||
/* 0 - currently only one recv sg is supported */
|
||||
wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
|
||||
wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
|
||||
ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
|
||||
wc[i].wc_flags |= IB_WC_WITH_SMAC;
|
||||
if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
|
||||
wc[i].wc_flags |= IB_WC_WITH_VLAN;
|
||||
wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
|
||||
}
|
||||
|
||||
qedr_inc_sw_cons(&qp->rq);
|
||||
i++;
|
||||
}
|
||||
|
||||
while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
|
||||
memset(&wc[i], 0, sizeof(*wc));
|
||||
|
||||
wc[i].qp = &qp->ibqp;
|
||||
wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
|
||||
wc[i].opcode = IB_WC_SEND;
|
||||
wc[i].status = IB_WC_SUCCESS;
|
||||
|
||||
qedr_inc_sw_cons(&qp->sq);
|
||||
i++;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_GSI,
|
||||
"gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
|
||||
num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
|
||||
qp->sq.gsi_cons, qp->ibqp.qp_num);
|
||||
|
||||
return i;
|
||||
}
|
61
drivers/infiniband/hw/qedr/qedr_cm.h
Normal file
61
drivers/infiniband/hw/qedr/qedr_cm.h
Normal file
@ -0,0 +1,61 @@
|
||||
/* QLogic qedr NIC Driver
|
||||
* Copyright (c) 2015-2016 QLogic Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and /or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef LINUX_QEDR_CM_H_
|
||||
#define LINUX_QEDR_CM_H_
|
||||
|
||||
#define QEDR_GSI_MAX_RECV_WR (4096)
|
||||
#define QEDR_GSI_MAX_SEND_WR (4096)
|
||||
|
||||
#define QEDR_GSI_MAX_RECV_SGE (1) /* LL2 FW limitation */
|
||||
|
||||
#define ETH_P_ROCE (0x8915)
|
||||
#define QEDR_ROCE_V2_UDP_SPORT (0000)
|
||||
|
||||
static inline u32 qedr_get_ipv4_from_gid(u8 *gid)
|
||||
{
|
||||
return *(u32 *)(void *)&gid[12];
|
||||
}
|
||||
|
||||
/* RDMA CM */
|
||||
int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr);
|
||||
struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
|
||||
struct ib_qp_init_attr *attrs,
|
||||
struct qedr_qp *qp);
|
||||
void qedr_store_gsi_qp_cq(struct qedr_dev *dev,
|
||||
struct qedr_qp *qp, struct ib_qp_init_attr *attrs);
|
||||
int qedr_destroy_gsi_qp(struct qedr_dev *dev);
|
||||
void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info);
|
||||
#endif
|
56
drivers/infiniband/hw/qedr/qedr_hsi.h
Normal file
56
drivers/infiniband/hw/qedr/qedr_hsi.h
Normal file
@ -0,0 +1,56 @@
|
||||
/* QLogic qedr NIC Driver
|
||||
* Copyright (c) 2015-2016 QLogic Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and /or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef __QED_HSI_ROCE__
|
||||
#define __QED_HSI_ROCE__
|
||||
|
||||
#include <linux/qed/common_hsi.h>
|
||||
#include <linux/qed/roce_common.h>
|
||||
#include "qedr_hsi_rdma.h"
|
||||
|
||||
/* Affiliated asynchronous events / errors enumeration */
|
||||
enum roce_async_events_type {
|
||||
ROCE_ASYNC_EVENT_NONE = 0,
|
||||
ROCE_ASYNC_EVENT_COMM_EST = 1,
|
||||
ROCE_ASYNC_EVENT_SQ_DRAINED,
|
||||
ROCE_ASYNC_EVENT_SRQ_LIMIT,
|
||||
ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
|
||||
ROCE_ASYNC_EVENT_CQ_ERR,
|
||||
ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
|
||||
ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
|
||||
ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
|
||||
ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
|
||||
ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
|
||||
ROCE_ASYNC_EVENT_SRQ_EMPTY,
|
||||
MAX_ROCE_ASYNC_EVENTS_TYPE
|
||||
};
|
||||
|
||||
#endif /* __QED_HSI_ROCE__ */
|
748
drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
Normal file
748
drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
Normal file
@ -0,0 +1,748 @@
|
||||
/* QLogic qedr NIC Driver
|
||||
* Copyright (c) 2015-2016 QLogic Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and /or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef __QED_HSI_RDMA__
|
||||
#define __QED_HSI_RDMA__
|
||||
|
||||
#include <linux/qed/rdma_common.h>
|
||||
|
||||
/* rdma completion notification queue element */
|
||||
struct rdma_cnqe {
|
||||
struct regpair cq_handle;
|
||||
};
|
||||
|
||||
struct rdma_cqe_responder {
|
||||
struct regpair srq_wr_id;
|
||||
struct regpair qp_handle;
|
||||
__le32 imm_data_or_inv_r_Key;
|
||||
__le32 length;
|
||||
__le32 imm_data_hi;
|
||||
__le16 rq_cons;
|
||||
u8 flags;
|
||||
#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK 0x1
|
||||
#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
|
||||
#define RDMA_CQE_RESPONDER_TYPE_MASK 0x3
|
||||
#define RDMA_CQE_RESPONDER_TYPE_SHIFT 1
|
||||
#define RDMA_CQE_RESPONDER_INV_FLG_MASK 0x1
|
||||
#define RDMA_CQE_RESPONDER_INV_FLG_SHIFT 3
|
||||
#define RDMA_CQE_RESPONDER_IMM_FLG_MASK 0x1
|
||||
#define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT 4
|
||||
#define RDMA_CQE_RESPONDER_RDMA_FLG_MASK 0x1
|
||||
#define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT 5
|
||||
#define RDMA_CQE_RESPONDER_RESERVED2_MASK 0x3
|
||||
#define RDMA_CQE_RESPONDER_RESERVED2_SHIFT 6
|
||||
u8 status;
|
||||
};
|
||||
|
||||
struct rdma_cqe_requester {
|
||||
__le16 sq_cons;
|
||||
__le16 reserved0;
|
||||
__le32 reserved1;
|
||||
struct regpair qp_handle;
|
||||
struct regpair reserved2;
|
||||
__le32 reserved3;
|
||||
__le16 reserved4;
|
||||
u8 flags;
|
||||
#define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK 0x1
|
||||
#define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
|
||||
#define RDMA_CQE_REQUESTER_TYPE_MASK 0x3
|
||||
#define RDMA_CQE_REQUESTER_TYPE_SHIFT 1
|
||||
#define RDMA_CQE_REQUESTER_RESERVED5_MASK 0x1F
|
||||
#define RDMA_CQE_REQUESTER_RESERVED5_SHIFT 3
|
||||
u8 status;
|
||||
};
|
||||
|
||||
struct rdma_cqe_common {
|
||||
struct regpair reserved0;
|
||||
struct regpair qp_handle;
|
||||
__le16 reserved1[7];
|
||||
u8 flags;
|
||||
#define RDMA_CQE_COMMON_TOGGLE_BIT_MASK 0x1
|
||||
#define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
|
||||
#define RDMA_CQE_COMMON_TYPE_MASK 0x3
|
||||
#define RDMA_CQE_COMMON_TYPE_SHIFT 1
|
||||
#define RDMA_CQE_COMMON_RESERVED2_MASK 0x1F
|
||||
#define RDMA_CQE_COMMON_RESERVED2_SHIFT 3
|
||||
u8 status;
|
||||
};
|
||||
|
||||
/* rdma completion queue element */
|
||||
union rdma_cqe {
|
||||
struct rdma_cqe_responder resp;
|
||||
struct rdma_cqe_requester req;
|
||||
struct rdma_cqe_common cmn;
|
||||
};
|
||||
|
||||
/* * CQE requester status enumeration */
|
||||
enum rdma_cqe_requester_status_enum {
|
||||
RDMA_CQE_REQ_STS_OK,
|
||||
RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
|
||||
RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
|
||||
RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
|
||||
RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
|
||||
RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
|
||||
RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
|
||||
RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
|
||||
RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
|
||||
RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
|
||||
RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
|
||||
RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
|
||||
MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
|
||||
};
|
||||
|
||||
/* CQE responder status enumeration */
|
||||
enum rdma_cqe_responder_status_enum {
|
||||
RDMA_CQE_RESP_STS_OK,
|
||||
RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
|
||||
RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
|
||||
RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
|
||||
RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
|
||||
RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
|
||||
RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
|
||||
RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
|
||||
MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
|
||||
};
|
||||
|
||||
/* CQE type enumeration */
|
||||
enum rdma_cqe_type {
|
||||
RDMA_CQE_TYPE_REQUESTER,
|
||||
RDMA_CQE_TYPE_RESPONDER_RQ,
|
||||
RDMA_CQE_TYPE_RESPONDER_SRQ,
|
||||
RDMA_CQE_TYPE_INVALID,
|
||||
MAX_RDMA_CQE_TYPE
|
||||
};
|
||||
|
||||
struct rdma_sq_sge {
|
||||
__le32 length;
|
||||
struct regpair addr;
|
||||
__le32 l_key;
|
||||
};
|
||||
|
||||
struct rdma_rq_sge {
|
||||
struct regpair addr;
|
||||
__le32 length;
|
||||
__le32 flags;
|
||||
#define RDMA_RQ_SGE_L_KEY_MASK 0x3FFFFFF
|
||||
#define RDMA_RQ_SGE_L_KEY_SHIFT 0
|
||||
#define RDMA_RQ_SGE_NUM_SGES_MASK 0x7
|
||||
#define RDMA_RQ_SGE_NUM_SGES_SHIFT 26
|
||||
#define RDMA_RQ_SGE_RESERVED0_MASK 0x7
|
||||
#define RDMA_RQ_SGE_RESERVED0_SHIFT 29
|
||||
};
|
||||
|
||||
struct rdma_srq_sge {
|
||||
struct regpair addr;
|
||||
__le32 length;
|
||||
__le32 l_key;
|
||||
};
|
||||
|
||||
/* Rdma doorbell data for SQ and RQ */
|
||||
struct rdma_pwm_val16_data {
|
||||
__le16 icid;
|
||||
__le16 value;
|
||||
};
|
||||
|
||||
union rdma_pwm_val16_data_union {
|
||||
struct rdma_pwm_val16_data as_struct;
|
||||
__le32 as_dword;
|
||||
};
|
||||
|
||||
/* Rdma doorbell data for CQ */
|
||||
struct rdma_pwm_val32_data {
|
||||
__le16 icid;
|
||||
u8 agg_flags;
|
||||
u8 params;
|
||||
#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3
|
||||
#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
|
||||
#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
|
||||
#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
|
||||
#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F
|
||||
#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3
|
||||
__le32 value;
|
||||
};
|
||||
|
||||
/* DIF Block size options */
|
||||
enum rdma_dif_block_size {
|
||||
RDMA_DIF_BLOCK_512 = 0,
|
||||
RDMA_DIF_BLOCK_4096 = 1,
|
||||
MAX_RDMA_DIF_BLOCK_SIZE
|
||||
};
|
||||
|
||||
/* DIF CRC initial value */
|
||||
enum rdma_dif_crc_seed {
|
||||
RDMA_DIF_CRC_SEED_0000 = 0,
|
||||
RDMA_DIF_CRC_SEED_FFFF = 1,
|
||||
MAX_RDMA_DIF_CRC_SEED
|
||||
};
|
||||
|
||||
/* RDMA DIF Error Result Structure */
|
||||
struct rdma_dif_error_result {
|
||||
__le32 error_intervals;
|
||||
__le32 dif_error_1st_interval;
|
||||
u8 flags;
|
||||
#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK 0x1
|
||||
#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT 0
|
||||
#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK 0x1
|
||||
#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1
|
||||
#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK 0x1
|
||||
#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2
|
||||
#define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK 0xF
|
||||
#define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT 3
|
||||
#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK 0x1
|
||||
#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT 7
|
||||
u8 reserved1[55];
|
||||
};
|
||||
|
||||
/* DIF IO direction */
|
||||
enum rdma_dif_io_direction_flg {
|
||||
RDMA_DIF_DIR_RX = 0,
|
||||
RDMA_DIF_DIR_TX = 1,
|
||||
MAX_RDMA_DIF_IO_DIRECTION_FLG
|
||||
};
|
||||
|
||||
/* RDMA DIF Runt Result Structure */
|
||||
struct rdma_dif_runt_result {
|
||||
__le16 guard_tag;
|
||||
__le16 reserved[3];
|
||||
};
|
||||
|
||||
/* Memory window type enumeration */
|
||||
enum rdma_mw_type {
|
||||
RDMA_MW_TYPE_1,
|
||||
RDMA_MW_TYPE_2A,
|
||||
MAX_RDMA_MW_TYPE
|
||||
};
|
||||
|
||||
struct rdma_sq_atomic_wqe {
|
||||
__le32 reserved1;
|
||||
__le32 length;
|
||||
__le32 xrc_srq;
|
||||
u8 req_type;
|
||||
u8 flags;
|
||||
#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK 0x1
|
||||
#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT 0
|
||||
#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT 1
|
||||
#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT 2
|
||||
#define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT 3
|
||||
#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT 4
|
||||
#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK 0x1
|
||||
#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5
|
||||
#define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK 0x3
|
||||
#define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT 6
|
||||
u8 wqe_size;
|
||||
u8 prev_wqe_size;
|
||||
struct regpair remote_va;
|
||||
__le32 r_key;
|
||||
__le32 reserved2;
|
||||
struct regpair cmp_data;
|
||||
struct regpair swap_data;
|
||||
};
|
||||
|
||||
/* First element (16 bytes) of atomic wqe */
|
||||
struct rdma_sq_atomic_wqe_1st {
|
||||
__le32 reserved1;
|
||||
__le32 length;
|
||||
__le32 xrc_srq;
|
||||
u8 req_type;
|
||||
u8 flags;
|
||||
#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK 0x1
|
||||
#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT 0
|
||||
#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT 1
|
||||
#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2
|
||||
#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT 3
|
||||
#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT 4
|
||||
#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK 0x7
|
||||
#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT 5
|
||||
u8 wqe_size;
|
||||
u8 prev_wqe_size;
|
||||
};
|
||||
|
||||
/* Second element (16 bytes) of atomic wqe */
|
||||
struct rdma_sq_atomic_wqe_2nd {
|
||||
struct regpair remote_va;
|
||||
__le32 r_key;
|
||||
__le32 reserved2;
|
||||
};
|
||||
|
||||
/* Third element (16 bytes) of atomic wqe */
|
||||
struct rdma_sq_atomic_wqe_3rd {
|
||||
struct regpair cmp_data;
|
||||
struct regpair swap_data;
|
||||
};
|
||||
|
||||
struct rdma_sq_bind_wqe {
|
||||
struct regpair addr;
|
||||
__le32 l_key;
|
||||
u8 req_type;
|
||||
u8 flags;
|
||||
#define RDMA_SQ_BIND_WQE_COMP_FLG_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT 0
|
||||
#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT 1
|
||||
#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2
|
||||
#define RDMA_SQ_BIND_WQE_SE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3
|
||||
#define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4
|
||||
#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x7
|
||||
#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 5
|
||||
u8 wqe_size;
|
||||
u8 prev_wqe_size;
|
||||
u8 bind_ctrl;
|
||||
#define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0
|
||||
#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT 1
|
||||
#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x3F
|
||||
#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 2
|
||||
u8 access_ctrl;
|
||||
#define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0
|
||||
#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT 1
|
||||
#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2
|
||||
#define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT 3
|
||||
#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT 4
|
||||
#define RDMA_SQ_BIND_WQE_RESERVED2_MASK 0x7
|
||||
#define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT 5
|
||||
u8 reserved3;
|
||||
u8 length_hi;
|
||||
__le32 length_lo;
|
||||
__le32 parent_l_key;
|
||||
__le32 reserved4;
|
||||
};
|
||||
|
||||
/* First element (16 bytes) of bind wqe */
|
||||
struct rdma_sq_bind_wqe_1st {
|
||||
struct regpair addr;
|
||||
__le32 l_key;
|
||||
u8 req_type;
|
||||
u8 flags;
|
||||
#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT 0
|
||||
#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
|
||||
#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
|
||||
#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT 3
|
||||
#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT 4
|
||||
#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK 0x7
|
||||
#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT 5
|
||||
u8 wqe_size;
|
||||
u8 prev_wqe_size;
|
||||
};
|
||||
|
||||
/* Second element (16 bytes) of bind wqe */
|
||||
struct rdma_sq_bind_wqe_2nd {
|
||||
u8 bind_ctrl;
|
||||
#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0
|
||||
#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT 1
|
||||
#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x3F
|
||||
#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 2
|
||||
u8 access_ctrl;
|
||||
#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0
|
||||
#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT 1
|
||||
#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
|
||||
#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT 3
|
||||
#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK 0x1
|
||||
#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT 4
|
||||
#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK 0x7
|
||||
#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT 5
|
||||
u8 reserved3;
|
||||
u8 length_hi;
|
||||
__le32 length_lo;
|
||||
__le32 parent_l_key;
|
||||
__le32 reserved4;
|
||||
};
|
||||
|
||||
/* Structure with only the SQ WQE common
|
||||
* fields. Size is of one SQ element (16B)
|
||||
*/
|
||||
struct rdma_sq_common_wqe {
|
||||
__le32 reserved1[3];
|
||||
u8 req_type;
|
||||
u8 flags;
|
||||
#define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK 0x1
|
||||
#define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT 0
|
||||
#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT 1
|
||||
#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2
|
||||
#define RDMA_SQ_COMMON_WQE_SE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT 3
|
||||
#define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT 4
|
||||
#define RDMA_SQ_COMMON_WQE_RESERVED0_MASK 0x7
|
||||
#define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT 5
|
||||
u8 wqe_size;
|
||||
u8 prev_wqe_size;
|
||||
};
|
||||
|
||||
struct rdma_sq_fmr_wqe {
|
||||
struct regpair addr;
|
||||
__le32 l_key;
|
||||
u8 req_type;
|
||||
u8 flags;
|
||||
#define RDMA_SQ_FMR_WQE_COMP_FLG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT 0
|
||||
#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT 1
|
||||
#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT 2
|
||||
#define RDMA_SQ_FMR_WQE_SE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT 3
|
||||
#define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT 4
|
||||
#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT 5
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED0_MASK 0x3
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT 6
|
||||
u8 wqe_size;
|
||||
u8 prev_wqe_size;
|
||||
u8 fmr_ctrl;
|
||||
#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK 0x1F
|
||||
#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT 0
|
||||
#define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT 5
|
||||
#define RDMA_SQ_FMR_WQE_BIND_EN_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT 6
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED1_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT 7
|
||||
u8 access_ctrl;
|
||||
#define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT 0
|
||||
#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT 1
|
||||
#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT 2
|
||||
#define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT 3
|
||||
#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT 4
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED2_MASK 0x7
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT 5
|
||||
u8 reserved3;
|
||||
u8 length_hi;
|
||||
__le32 length_lo;
|
||||
struct regpair pbl_addr;
|
||||
__le32 dif_base_ref_tag;
|
||||
__le16 dif_app_tag;
|
||||
__le16 dif_app_tag_mask;
|
||||
__le16 dif_runt_crc_value;
|
||||
__le16 dif_flags;
|
||||
#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0
|
||||
#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1
|
||||
#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2
|
||||
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
|
||||
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4
|
||||
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
|
||||
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7
|
||||
__le32 Reserved5;
|
||||
};
|
||||
|
||||
/* First element (16 bytes) of fmr wqe */
|
||||
struct rdma_sq_fmr_wqe_1st {
|
||||
struct regpair addr;
|
||||
__le32 l_key;
|
||||
u8 req_type;
|
||||
u8 flags;
|
||||
#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT 0
|
||||
#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT 1
|
||||
#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT 2
|
||||
#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT 3
|
||||
#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT 4
|
||||
#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
|
||||
#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK 0x3
|
||||
#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT 6
|
||||
u8 wqe_size;
|
||||
u8 prev_wqe_size;
|
||||
};
|
||||
|
||||
/* Second element (16 bytes) of fmr wqe */
|
||||
struct rdma_sq_fmr_wqe_2nd {
|
||||
u8 fmr_ctrl;
|
||||
#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK 0x1F
|
||||
#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0
|
||||
#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT 5
|
||||
#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT 6
|
||||
#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT 7
|
||||
u8 access_ctrl;
|
||||
#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT 0
|
||||
#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT 1
|
||||
#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
|
||||
#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT 3
|
||||
#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT 4
|
||||
#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK 0x7
|
||||
#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT 5
|
||||
u8 reserved3;
|
||||
u8 length_hi;
|
||||
__le32 length_lo;
|
||||
struct regpair pbl_addr;
|
||||
};
|
||||
|
||||
/* Third element (16 bytes) of fmr wqe */
|
||||
struct rdma_sq_fmr_wqe_3rd {
|
||||
__le32 dif_base_ref_tag;
|
||||
__le16 dif_app_tag;
|
||||
__le16 dif_app_tag_mask;
|
||||
__le16 dif_runt_crc_value;
|
||||
__le16 dif_flags;
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
|
||||
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF
|
||||
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7
|
||||
__le32 Reserved5;
|
||||
};
|
||||
|
||||
struct rdma_sq_local_inv_wqe {
|
||||
struct regpair reserved;
|
||||
__le32 inv_l_key;
|
||||
u8 req_type;
|
||||
u8 flags;
|
||||
#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK 0x1
|
||||
#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT 0
|
||||
#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT 1
|
||||
#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT 2
|
||||
#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT 3
|
||||
#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT 4
|
||||
#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK 0x1
|
||||
#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5
|
||||
#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK 0x3
|
||||
#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT 6
|
||||
u8 wqe_size;
|
||||
u8 prev_wqe_size;
|
||||
};
|
||||
|
||||
struct rdma_sq_rdma_wqe {
|
||||
__le32 imm_data;
|
||||
__le32 length;
|
||||
__le32 xrc_srq;
|
||||
u8 req_type;
|
||||
u8 flags;
|
||||
#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0
|
||||
#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1
|
||||
#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2
|
||||
#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3
|
||||
#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
|
||||
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
|
||||
#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3
|
||||
#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6
|
||||
u8 wqe_size;
|
||||
u8 prev_wqe_size;
|
||||
struct regpair remote_va;
|
||||
__le32 r_key;
|
||||
u8 dif_flags;
|
||||
#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0
|
||||
#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
|
||||
#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT 2
|
||||
#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1F
|
||||
#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 3
|
||||
u8 reserved2[3];
|
||||
};
|
||||
|
||||
/* First element (16 bytes) of rdma wqe */
|
||||
struct rdma_sq_rdma_wqe_1st {
|
||||
__le32 imm_data;
|
||||
__le32 length;
|
||||
__le32 xrc_srq;
|
||||
u8 req_type;
|
||||
u8 flags;
|
||||
#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT 0
|
||||
#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT 1
|
||||
#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT 2
|
||||
#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT 3
|
||||
#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT 4
|
||||
#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
|
||||
#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x3
|
||||
#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 6
|
||||
u8 wqe_size;
|
||||
u8 prev_wqe_size;
|
||||
};
|
||||
|
||||
/* Second element (16 bytes) of rdma wqe */
|
||||
struct rdma_sq_rdma_wqe_2nd {
|
||||
struct regpair remote_va;
|
||||
__le32 r_key;
|
||||
u8 dif_flags;
|
||||
#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT 0
|
||||
#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1
|
||||
#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT 2
|
||||
#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK 0x1F
|
||||
#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT 3
|
||||
u8 reserved2[3];
|
||||
};
|
||||
|
||||
/* SQ WQE req type enumeration */
|
||||
enum rdma_sq_req_type {
|
||||
RDMA_SQ_REQ_TYPE_SEND,
|
||||
RDMA_SQ_REQ_TYPE_SEND_WITH_IMM,
|
||||
RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE,
|
||||
RDMA_SQ_REQ_TYPE_RDMA_WR,
|
||||
RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM,
|
||||
RDMA_SQ_REQ_TYPE_RDMA_RD,
|
||||
RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP,
|
||||
RDMA_SQ_REQ_TYPE_ATOMIC_ADD,
|
||||
RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE,
|
||||
RDMA_SQ_REQ_TYPE_FAST_MR,
|
||||
RDMA_SQ_REQ_TYPE_BIND,
|
||||
RDMA_SQ_REQ_TYPE_INVALID,
|
||||
MAX_RDMA_SQ_REQ_TYPE
|
||||
};
|
||||
|
||||
struct rdma_sq_send_wqe {
|
||||
__le32 inv_key_or_imm_data;
|
||||
__le32 length;
|
||||
__le32 xrc_srq;
|
||||
u8 req_type;
|
||||
u8 flags;
|
||||
#define RDMA_SQ_SEND_WQE_COMP_FLG_MASK 0x1
|
||||
#define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT 0
|
||||
#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT 1
|
||||
#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT 2
|
||||
#define RDMA_SQ_SEND_WQE_SE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT 3
|
||||
#define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT 4
|
||||
#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK 0x1
|
||||
#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5
|
||||
#define RDMA_SQ_SEND_WQE_RESERVED0_MASK 0x3
|
||||
#define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT 6
|
||||
u8 wqe_size;
|
||||
u8 prev_wqe_size;
|
||||
__le32 reserved1[4];
|
||||
};
|
||||
|
||||
struct rdma_sq_send_wqe_1st {
|
||||
__le32 inv_key_or_imm_data;
|
||||
__le32 length;
|
||||
__le32 xrc_srq;
|
||||
u8 req_type;
|
||||
u8 flags;
|
||||
#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK 0x1
|
||||
#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT 0
|
||||
#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
|
||||
#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
|
||||
#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT 3
|
||||
#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK 0x1
|
||||
#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT 4
|
||||
#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK 0x7
|
||||
#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT 5
|
||||
u8 wqe_size;
|
||||
u8 prev_wqe_size;
|
||||
};
|
||||
|
||||
struct rdma_sq_send_wqe_2st {
|
||||
__le32 reserved1[4];
|
||||
};
|
||||
|
||||
#endif /* __QED_HSI_RDMA__ */
|
3547
drivers/infiniband/hw/qedr/verbs.c
Normal file
3547
drivers/infiniband/hw/qedr/verbs.c
Normal file
File diff suppressed because it is too large
Load Diff
101
drivers/infiniband/hw/qedr/verbs.h
Normal file
101
drivers/infiniband/hw/qedr/verbs.h
Normal file
@ -0,0 +1,101 @@
|
||||
/* QLogic qedr NIC Driver
|
||||
* Copyright (c) 2015-2016 QLogic Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and /or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef __QEDR_VERBS_H__
|
||||
#define __QEDR_VERBS_H__
|
||||
|
||||
int qedr_query_device(struct ib_device *ibdev,
|
||||
struct ib_device_attr *attr, struct ib_udata *udata);
|
||||
int qedr_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
|
||||
int qedr_modify_port(struct ib_device *, u8 port, int mask,
|
||||
struct ib_port_modify *props);
|
||||
|
||||
int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid);
|
||||
|
||||
int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
|
||||
|
||||
struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *);
|
||||
int qedr_dealloc_ucontext(struct ib_ucontext *);
|
||||
|
||||
int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
|
||||
int qedr_del_gid(struct ib_device *device, u8 port_num,
|
||||
unsigned int index, void **context);
|
||||
int qedr_add_gid(struct ib_device *device, u8 port_num,
|
||||
unsigned int index, const union ib_gid *gid,
|
||||
const struct ib_gid_attr *attr, void **context);
|
||||
struct ib_pd *qedr_alloc_pd(struct ib_device *,
|
||||
struct ib_ucontext *, struct ib_udata *);
|
||||
int qedr_dealloc_pd(struct ib_pd *pd);
|
||||
|
||||
struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *ib_ctx,
|
||||
struct ib_udata *udata);
|
||||
int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
|
||||
int qedr_destroy_cq(struct ib_cq *);
|
||||
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
||||
struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *);
|
||||
int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask, struct ib_qp_init_attr *);
|
||||
int qedr_destroy_qp(struct ib_qp *ibqp);
|
||||
|
||||
struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr);
|
||||
int qedr_destroy_ah(struct ib_ah *ibah);
|
||||
|
||||
int qedr_dereg_mr(struct ib_mr *);
|
||||
struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc);
|
||||
|
||||
struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length,
|
||||
u64 virt, int acc, struct ib_udata *);
|
||||
|
||||
int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
|
||||
int sg_nents, unsigned int *sg_offset);
|
||||
|
||||
struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
|
||||
u32 max_num_sg);
|
||||
int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
|
||||
int qedr_post_send(struct ib_qp *, struct ib_send_wr *,
|
||||
struct ib_send_wr **bad_wr);
|
||||
int qedr_post_recv(struct ib_qp *, struct ib_recv_wr *,
|
||||
struct ib_recv_wr **bad_wr);
|
||||
int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
|
||||
u8 port_num, const struct ib_wc *in_wc,
|
||||
const struct ib_grh *in_grh,
|
||||
const struct ib_mad_hdr *in_mad,
|
||||
size_t in_mad_size, struct ib_mad_hdr *out_mad,
|
||||
size_t *out_mad_size, u16 *out_mad_pkey_index);
|
||||
|
||||
int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
|
||||
struct ib_port_immutable *immutable);
|
||||
#endif
|
@ -107,15 +107,4 @@ config QEDE
|
||||
---help---
|
||||
This enables the support for ...
|
||||
|
||||
config INFINIBAND_QEDR
|
||||
tristate "QLogic qede RoCE sources [debug]"
|
||||
depends on QEDE && 64BIT
|
||||
select QED_LL2
|
||||
default n
|
||||
---help---
|
||||
This provides a temporary node that allows the compilation
|
||||
and logical testing of the InfiniBand over Ethernet support
|
||||
for QLogic QED. This would be replaced by the 'real' option
|
||||
once the QEDR driver is added [+relocated].
|
||||
|
||||
endif # NET_VENDOR_QLOGIC
|
||||
|
@ -612,6 +612,8 @@
|
||||
*/
|
||||
#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
|
||||
#define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */
|
||||
#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */
|
||||
#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* Atomic 64-bit compare */
|
||||
#define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */
|
||||
#define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */
|
||||
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
|
||||
@ -619,6 +621,7 @@
|
||||
#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
|
||||
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
|
||||
#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */
|
||||
#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */
|
||||
#define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x0100 /* Allow IDO for requests */
|
||||
#define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x0200 /* Allow IDO for completions */
|
||||
#define PCI_EXP_DEVCTL2_LTR_EN 0x0400 /* Enable LTR mechanism */
|
||||
|
106
include/uapi/rdma/qedr-abi.h
Normal file
106
include/uapi/rdma/qedr-abi.h
Normal file
@ -0,0 +1,106 @@
|
||||
/* QLogic qedr NIC Driver
|
||||
* Copyright (c) 2015-2016 QLogic Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and /or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#ifndef __QEDR_USER_H__
|
||||
#define __QEDR_USER_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define QEDR_ABI_VERSION (8)
|
||||
|
||||
/* user kernel communication data structures. */
|
||||
|
||||
struct qedr_alloc_ucontext_resp {
|
||||
__u64 db_pa;
|
||||
__u32 db_size;
|
||||
|
||||
__u32 max_send_wr;
|
||||
__u32 max_recv_wr;
|
||||
__u32 max_srq_wr;
|
||||
__u32 sges_per_send_wr;
|
||||
__u32 sges_per_recv_wr;
|
||||
__u32 sges_per_srq_wr;
|
||||
__u32 max_cqes;
|
||||
};
|
||||
|
||||
struct qedr_alloc_pd_ureq {
|
||||
__u64 rsvd1;
|
||||
};
|
||||
|
||||
struct qedr_alloc_pd_uresp {
|
||||
__u32 pd_id;
|
||||
};
|
||||
|
||||
struct qedr_create_cq_ureq {
|
||||
__u64 addr;
|
||||
__u64 len;
|
||||
};
|
||||
|
||||
struct qedr_create_cq_uresp {
|
||||
__u32 db_offset;
|
||||
__u16 icid;
|
||||
};
|
||||
|
||||
struct qedr_create_qp_ureq {
|
||||
__u32 qp_handle_hi;
|
||||
__u32 qp_handle_lo;
|
||||
|
||||
/* SQ */
|
||||
/* user space virtual address of SQ buffer */
|
||||
__u64 sq_addr;
|
||||
|
||||
/* length of SQ buffer */
|
||||
__u64 sq_len;
|
||||
|
||||
/* RQ */
|
||||
/* user space virtual address of RQ buffer */
|
||||
__u64 rq_addr;
|
||||
|
||||
/* length of RQ buffer */
|
||||
__u64 rq_len;
|
||||
};
|
||||
|
||||
struct qedr_create_qp_uresp {
|
||||
__u32 qp_id;
|
||||
__u32 atomic_supported;
|
||||
|
||||
/* SQ */
|
||||
__u32 sq_db_offset;
|
||||
__u16 sq_icid;
|
||||
|
||||
/* RQ */
|
||||
__u32 rq_db_offset;
|
||||
__u16 rq_icid;
|
||||
|
||||
__u32 rq_db2_offset;
|
||||
};
|
||||
|
||||
#endif /* __QEDR_USER_H__ */
|
Loading…
Reference in New Issue
Block a user