IB: Add CQ comp_vector support

Add a num_comp_vectors member to struct ib_device and extend
ib_create_cq() to pass in a comp_vector parameter -- this parallels
the userspace libibverbs API.  Update all hardware drivers to set
num_comp_vectors to 1 and have all ULPs pass 0 for the comp_vector
value.  Pass the value of num_comp_vectors to userspace rather than
hard-coding a value of 1.

We want multiple CQ event vector support (via MSI-X or similar for
adapters that can generate multiple interrupts), but it's not clear
how many vectors we want, or how we want to deal with policy issues
such as how to decide which vector to use or how to set up interrupt
affinity.  This patch is useful for experimenting, since no core
changes will be necessary when updating a driver to support multiple
vectors, and we know that we want to make at least these changes
anyway.

Signed-off-by: Michael S. Tsirkin <mst@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
Michael S. Tsirkin 2007-05-03 13:48:47 +03:00 committed by Roland Dreier
parent 154257f362
commit f4fd0b224d
18 changed files with 28 additions and 16 deletions

View File

@ -2771,7 +2771,7 @@ static int ib_mad_port_open(struct ib_device *device,
cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
port_priv->cq = ib_create_cq(port_priv->device, port_priv->cq = ib_create_cq(port_priv->device,
ib_mad_thread_completion_handler, ib_mad_thread_completion_handler,
NULL, port_priv, cq_size); NULL, port_priv, cq_size, 0);
if (IS_ERR(port_priv->cq)) { if (IS_ERR(port_priv->cq)) {
printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n"); printk(KERN_ERR PFX "Couldn't create ib_mad CQ\n");
ret = PTR_ERR(port_priv->cq); ret = PTR_ERR(port_priv->cq);

View File

@ -802,6 +802,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
INIT_LIST_HEAD(&obj->async_list); INIT_LIST_HEAD(&obj->async_list);
cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
cmd.comp_vector,
file->ucontext, &udata); file->ucontext, &udata);
if (IS_ERR(cq)) { if (IS_ERR(cq)) {
ret = PTR_ERR(cq); ret = PTR_ERR(cq);

View File

@ -752,7 +752,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
spin_unlock(&map_lock); spin_unlock(&map_lock);
uverbs_dev->ib_dev = device; uverbs_dev->ib_dev = device;
uverbs_dev->num_comp_vectors = 1; uverbs_dev->num_comp_vectors = device->num_comp_vectors;
uverbs_dev->dev = cdev_alloc(); uverbs_dev->dev = cdev_alloc();
if (!uverbs_dev->dev) if (!uverbs_dev->dev)

View File

@ -609,11 +609,11 @@ EXPORT_SYMBOL(ib_destroy_qp);
struct ib_cq *ib_create_cq(struct ib_device *device, struct ib_cq *ib_create_cq(struct ib_device *device,
ib_comp_handler comp_handler, ib_comp_handler comp_handler,
void (*event_handler)(struct ib_event *, void *), void (*event_handler)(struct ib_event *, void *),
void *cq_context, int cqe) void *cq_context, int cqe, int comp_vector)
{ {
struct ib_cq *cq; struct ib_cq *cq;
cq = device->create_cq(device, cqe, NULL, NULL); cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
if (!IS_ERR(cq)) { if (!IS_ERR(cq)) {
cq->device = device; cq->device = device;

View File

@ -290,7 +290,7 @@ static int c2_destroy_qp(struct ib_qp *ib_qp)
return 0; return 0;
} }
static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vector,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata) struct ib_udata *udata)
{ {
@ -795,6 +795,7 @@ int c2_register_device(struct c2_dev *dev)
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6); memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
dev->ibdev.phys_port_cnt = 1; dev->ibdev.phys_port_cnt = 1;
dev->ibdev.num_comp_vectors = 1;
dev->ibdev.dma_device = &dev->pcidev->dev; dev->ibdev.dma_device = &dev->pcidev->dev;
dev->ibdev.query_device = c2_query_device; dev->ibdev.query_device = c2_query_device;
dev->ibdev.query_port = c2_query_port; dev->ibdev.query_port = c2_query_port;

View File

@ -139,7 +139,7 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq)
return 0; return 0;
} }
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
struct ib_ucontext *ib_context, struct ib_ucontext *ib_context,
struct ib_udata *udata) struct ib_udata *udata)
{ {
@ -1110,6 +1110,7 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.node_type = RDMA_NODE_RNIC; dev->ibdev.node_type = RDMA_NODE_RNIC;
memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports; dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
dev->ibdev.num_comp_vectors = 1;
dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
dev->ibdev.query_device = iwch_query_device; dev->ibdev.query_device = iwch_query_device;
dev->ibdev.query_port = iwch_query_port; dev->ibdev.query_port = iwch_query_port;

View File

@ -113,7 +113,7 @@ struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
return ret; return ret;
} }
struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata) struct ib_udata *udata)
{ {

View File

@ -123,7 +123,7 @@ int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq);
void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq); void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq);
struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);

View File

@ -313,6 +313,7 @@ int ehca_init_device(struct ehca_shca *shca)
shca->ib_device.node_type = RDMA_NODE_IB_CA; shca->ib_device.node_type = RDMA_NODE_IB_CA;
shca->ib_device.phys_port_cnt = shca->num_ports; shca->ib_device.phys_port_cnt = shca->num_ports;
shca->ib_device.num_comp_vectors = 1;
shca->ib_device.dma_device = &shca->ibmebus_dev->ofdev.dev; shca->ib_device.dma_device = &shca->ibmebus_dev->ofdev.dev;
shca->ib_device.query_device = ehca_query_device; shca->ib_device.query_device = ehca_query_device;
shca->ib_device.query_port = ehca_query_port; shca->ib_device.query_port = ehca_query_port;
@ -375,7 +376,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
return -EPERM; return -EPERM;
} }
ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10); ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10, 0);
if (IS_ERR(ibcq)) { if (IS_ERR(ibcq)) {
ehca_err(&shca->ib_device, "Cannot create AQP1 CQ."); ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
return PTR_ERR(ibcq); return PTR_ERR(ibcq);

View File

@ -204,7 +204,7 @@ static void send_complete(unsigned long data)
* *
* Called by ib_create_cq() in the generic verbs code. * Called by ib_create_cq() in the generic verbs code.
*/ */
struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata) struct ib_udata *udata)
{ {

View File

@ -1561,6 +1561,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
(1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
dev->node_type = RDMA_NODE_IB_CA; dev->node_type = RDMA_NODE_IB_CA;
dev->phys_port_cnt = 1; dev->phys_port_cnt = 1;
dev->num_comp_vectors = 1;
dev->dma_device = &dd->pcidev->dev; dev->dma_device = &dd->pcidev->dev;
dev->query_device = ipath_query_device; dev->query_device = ipath_query_device;
dev->modify_device = ipath_modify_device; dev->modify_device = ipath_modify_device;

View File

@ -735,7 +735,7 @@ int ipath_destroy_srq(struct ib_srq *ibsrq);
int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);

View File

@ -663,6 +663,7 @@ static int mthca_destroy_qp(struct ib_qp *qp)
} }
static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
int comp_vector,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata) struct ib_udata *udata)
{ {
@ -1292,6 +1293,7 @@ int mthca_register_device(struct mthca_dev *dev)
(1ull << IB_USER_VERBS_CMD_DETACH_MCAST); (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
dev->ib_dev.node_type = RDMA_NODE_IB_CA; dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.phys_port_cnt = dev->limits.num_ports; dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
dev->ib_dev.num_comp_vectors = 1;
dev->ib_dev.dma_device = &dev->pdev->dev; dev->ib_dev.dma_device = &dev->pdev->dev;
dev->ib_dev.query_device = mthca_query_device; dev->ib_dev.query_device = mthca_query_device;
dev->ib_dev.query_port = mthca_query_port; dev->ib_dev.query_port = mthca_query_port;

View File

@ -793,7 +793,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
} }
p->cq = ib_create_cq(priv->ca, ipoib_cm_tx_completion, NULL, p, p->cq = ib_create_cq(priv->ca, ipoib_cm_tx_completion, NULL, p,
ipoib_sendq_size + 1); ipoib_sendq_size + 1, 0);
if (IS_ERR(p->cq)) { if (IS_ERR(p->cq)) {
ret = PTR_ERR(p->cq); ret = PTR_ERR(p->cq);
ipoib_warn(priv, "failed to allocate tx cq: %d\n", ret); ipoib_warn(priv, "failed to allocate tx cq: %d\n", ret);

View File

@ -187,7 +187,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
if (!ret) if (!ret)
size += ipoib_recvq_size; size += ipoib_recvq_size;
priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size); priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0);
if (IS_ERR(priv->cq)) { if (IS_ERR(priv->cq)) {
printk(KERN_WARNING "%s: failed to create CQ\n", ca->name); printk(KERN_WARNING "%s: failed to create CQ\n", ca->name);
goto out_free_mr; goto out_free_mr;

View File

@ -76,7 +76,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
iser_cq_callback, iser_cq_callback,
iser_cq_event_callback, iser_cq_event_callback,
(void *)device, (void *)device,
ISER_MAX_CQ_LEN); ISER_MAX_CQ_LEN, 0);
if (IS_ERR(device->cq)) if (IS_ERR(device->cq))
goto cq_err; goto cq_err;

View File

@ -197,7 +197,7 @@ static int srp_create_target_ib(struct srp_target_port *target)
return -ENOMEM; return -ENOMEM;
target->cq = ib_create_cq(target->srp_host->dev->dev, srp_completion, target->cq = ib_create_cq(target->srp_host->dev->dev, srp_completion,
NULL, target, SRP_CQ_SIZE); NULL, target, SRP_CQ_SIZE, 0);
if (IS_ERR(target->cq)) { if (IS_ERR(target->cq)) {
ret = PTR_ERR(target->cq); ret = PTR_ERR(target->cq);
goto out; goto out;

View File

@ -912,6 +912,8 @@ struct ib_device {
u32 flags; u32 flags;
int num_comp_vectors;
struct iw_cm_verbs *iwcm; struct iw_cm_verbs *iwcm;
int (*query_device)(struct ib_device *device, int (*query_device)(struct ib_device *device,
@ -978,6 +980,7 @@ struct ib_device {
struct ib_recv_wr *recv_wr, struct ib_recv_wr *recv_wr,
struct ib_recv_wr **bad_recv_wr); struct ib_recv_wr **bad_recv_wr);
struct ib_cq * (*create_cq)(struct ib_device *device, int cqe, struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
int comp_vector,
struct ib_ucontext *context, struct ib_ucontext *context,
struct ib_udata *udata); struct ib_udata *udata);
int (*destroy_cq)(struct ib_cq *cq); int (*destroy_cq)(struct ib_cq *cq);
@ -1358,13 +1361,15 @@ static inline int ib_post_recv(struct ib_qp *qp,
* @cq_context: Context associated with the CQ returned to the user via * @cq_context: Context associated with the CQ returned to the user via
* the associated completion and event handlers. * the associated completion and event handlers.
* @cqe: The minimum size of the CQ. * @cqe: The minimum size of the CQ.
* @comp_vector - Completion vector used to signal completion events.
* Must be >= 0 and < context->num_comp_vectors.
* *
* Users can examine the cq structure to determine the actual CQ size. * Users can examine the cq structure to determine the actual CQ size.
*/ */
struct ib_cq *ib_create_cq(struct ib_device *device, struct ib_cq *ib_create_cq(struct ib_device *device,
ib_comp_handler comp_handler, ib_comp_handler comp_handler,
void (*event_handler)(struct ib_event *, void *), void (*event_handler)(struct ib_event *, void *),
void *cq_context, int cqe); void *cq_context, int cqe, int comp_vector);
/** /**
* ib_resize_cq - Modifies the capacity of the CQ. * ib_resize_cq - Modifies the capacity of the CQ.