mirror of
https://github.com/torvalds/linux.git
synced 2024-11-14 16:12:02 +00:00
IB: Pass only ib_udata in function prototypes
Now when ib_udata is passed to all the driver's object create/destroy APIs the ib_udata will carry the ib_ucontext for every user command. There is no need to also pass the ib_ucontext via the functions prototypes. Make ib_udata the only argument psssed. Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@oracle.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
bdeacabd1a
commit
ff23dfa134
@ -147,7 +147,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
|
||||
struct ib_cq *cq;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
cq = dev->ops.create_cq(dev, &cq_attr, NULL, NULL);
|
||||
cq = dev->ops.create_cq(dev, &cq_attr, NULL);
|
||||
if (IS_ERR(cq))
|
||||
return cq;
|
||||
|
||||
|
@ -423,7 +423,7 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
|
||||
atomic_set(&pd->usecnt, 0);
|
||||
pd->res.type = RDMA_RESTRACK_PD;
|
||||
|
||||
ret = ib_dev->ops.alloc_pd(pd, uobj->context, &attrs->driver_udata);
|
||||
ret = ib_dev->ops.alloc_pd(pd, &attrs->driver_udata);
|
||||
if (ret)
|
||||
goto err_alloc;
|
||||
|
||||
@ -594,8 +594,7 @@ static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs)
|
||||
}
|
||||
|
||||
if (!xrcd) {
|
||||
xrcd = ib_dev->ops.alloc_xrcd(ib_dev, obj->uobject.context,
|
||||
&attrs->driver_udata);
|
||||
xrcd = ib_dev->ops.alloc_xrcd(ib_dev, &attrs->driver_udata);
|
||||
if (IS_ERR(xrcd)) {
|
||||
ret = PTR_ERR(xrcd);
|
||||
goto err;
|
||||
@ -1009,8 +1008,7 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
|
||||
attr.comp_vector = cmd->comp_vector;
|
||||
attr.flags = cmd->flags;
|
||||
|
||||
cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context,
|
||||
&attrs->driver_udata);
|
||||
cq = ib_dev->ops.create_cq(ib_dev, &attr, &attrs->driver_udata);
|
||||
if (IS_ERR(cq)) {
|
||||
ret = PTR_ERR(cq);
|
||||
goto err_file;
|
||||
|
@ -111,8 +111,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
|
||||
INIT_LIST_HEAD(&obj->comp_list);
|
||||
INIT_LIST_HEAD(&obj->async_list);
|
||||
|
||||
cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context,
|
||||
&attrs->driver_udata);
|
||||
cq = ib_dev->ops.create_cq(ib_dev, &attr, &attrs->driver_udata);
|
||||
if (IS_ERR(cq)) {
|
||||
ret = PTR_ERR(cq);
|
||||
goto err_event_file;
|
||||
|
@ -269,7 +269,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
|
||||
pd->res.type = RDMA_RESTRACK_PD;
|
||||
rdma_restrack_set_task(&pd->res, caller);
|
||||
|
||||
ret = device->ops.alloc_pd(pd, NULL, NULL);
|
||||
ret = device->ops.alloc_pd(pd, NULL);
|
||||
if (ret) {
|
||||
kfree(pd);
|
||||
return ERR_PTR(ret);
|
||||
@ -1911,7 +1911,7 @@ struct ib_cq *__ib_create_cq(struct ib_device *device,
|
||||
{
|
||||
struct ib_cq *cq;
|
||||
|
||||
cq = device->ops.create_cq(device, cq_attr, NULL, NULL);
|
||||
cq = device->ops.create_cq(device, cq_attr, NULL);
|
||||
|
||||
if (!IS_ERR(cq)) {
|
||||
cq->device = device;
|
||||
@ -2142,7 +2142,7 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
|
||||
if (!device->ops.alloc_xrcd)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
xrcd = device->ops.alloc_xrcd(device, NULL, NULL);
|
||||
xrcd = device->ops.alloc_xrcd(device, NULL);
|
||||
if (!IS_ERR(xrcd)) {
|
||||
xrcd->device = device;
|
||||
xrcd->inode = NULL;
|
||||
|
@ -576,14 +576,12 @@ void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
|
||||
&pd->qplib_pd);
|
||||
}
|
||||
|
||||
int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *ucontext,
|
||||
struct ib_udata *udata)
|
||||
int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = ibpd->device;
|
||||
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
|
||||
struct bnxt_re_ucontext *ucntx = container_of(ucontext,
|
||||
struct bnxt_re_ucontext,
|
||||
ib_uctx);
|
||||
struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
|
||||
udata, struct bnxt_re_ucontext, ib_uctx);
|
||||
struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
|
||||
int rc;
|
||||
|
||||
@ -2589,7 +2587,6 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
|
||||
struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
|
||||
@ -2616,12 +2613,10 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
|
||||
if (entries > dev_attr->max_cq_wqes + 1)
|
||||
entries = dev_attr->max_cq_wqes + 1;
|
||||
|
||||
if (context) {
|
||||
if (udata) {
|
||||
struct bnxt_re_cq_req req;
|
||||
struct bnxt_re_ucontext *uctx = container_of
|
||||
(context,
|
||||
struct bnxt_re_ucontext,
|
||||
ib_uctx);
|
||||
struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
|
||||
udata, struct bnxt_re_ucontext, ib_uctx);
|
||||
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
|
||||
rc = -EFAULT;
|
||||
goto fail;
|
||||
@ -2672,7 +2667,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
|
||||
atomic_inc(&rdev->cq_count);
|
||||
spin_lock_init(&cq->cq_lock);
|
||||
|
||||
if (context) {
|
||||
if (udata) {
|
||||
struct bnxt_re_cq_resp resp;
|
||||
|
||||
resp.cqid = cq->qplib_cq.id;
|
||||
@ -2690,7 +2685,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
|
||||
return &cq->ib_cq;
|
||||
|
||||
c2fail:
|
||||
if (context)
|
||||
if (udata)
|
||||
ib_umem_release(cq->umem);
|
||||
fail:
|
||||
kfree(cq->cql);
|
||||
|
@ -163,8 +163,7 @@ int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
|
||||
int index, union ib_gid *gid);
|
||||
enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
|
||||
u8 port_num);
|
||||
int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
||||
void bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
||||
struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd,
|
||||
struct rdma_ah_attr *ah_attr,
|
||||
@ -197,7 +196,6 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
|
||||
const struct ib_recv_wr **bad_recv_wr);
|
||||
struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
|
||||
|
@ -106,7 +106,6 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
|
||||
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *ib_context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int entries = attr->cqe;
|
||||
@ -114,7 +113,6 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
|
||||
struct iwch_cq *chp;
|
||||
struct iwch_create_cq_resp uresp;
|
||||
struct iwch_create_cq_req ureq;
|
||||
struct iwch_ucontext *ucontext = NULL;
|
||||
static int warned;
|
||||
size_t resplen;
|
||||
|
||||
@ -127,8 +125,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
|
||||
if (!chp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (ib_context) {
|
||||
ucontext = to_iwch_ucontext(ib_context);
|
||||
if (udata) {
|
||||
if (!t3a_device(rhp)) {
|
||||
if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
|
||||
kfree(chp);
|
||||
@ -154,7 +151,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
|
||||
entries = roundup_pow_of_two(entries);
|
||||
chp->cq.size_log2 = ilog2(entries);
|
||||
|
||||
if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
|
||||
if (cxio_create_cq(&rhp->rdev, &chp->cq, !udata)) {
|
||||
kfree(chp);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
@ -170,8 +167,10 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
if (ucontext) {
|
||||
if (udata) {
|
||||
struct iwch_mm_entry *mm;
|
||||
struct iwch_ucontext *ucontext = rdma_udata_to_drv_context(
|
||||
udata, struct iwch_ucontext, ibucontext);
|
||||
|
||||
mm = kmalloc(sizeof *mm, GFP_KERNEL);
|
||||
if (!mm) {
|
||||
@ -378,8 +377,7 @@ static void iwch_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
|
||||
cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
|
||||
}
|
||||
|
||||
static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
static int iwch_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
|
||||
{
|
||||
struct iwch_pd *php = to_iwch_pd(pd);
|
||||
struct ib_device *ibdev = pd->device;
|
||||
@ -394,7 +392,7 @@ static int iwch_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
|
||||
|
||||
php->pdid = pdid;
|
||||
php->rhp = rhp;
|
||||
if (context) {
|
||||
if (udata) {
|
||||
struct iwch_alloc_pd_resp resp = {.pdid = php->pdid};
|
||||
|
||||
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
|
||||
|
@ -994,7 +994,6 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
|
||||
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *ib_context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int entries = attr->cqe;
|
||||
@ -1003,10 +1002,11 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
struct c4iw_cq *chp;
|
||||
struct c4iw_create_cq ucmd;
|
||||
struct c4iw_create_cq_resp uresp;
|
||||
struct c4iw_ucontext *ucontext = NULL;
|
||||
int ret, wr_len;
|
||||
size_t memsize, hwentries;
|
||||
struct c4iw_mm_entry *mm, *mm2;
|
||||
struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
|
||||
udata, struct c4iw_ucontext, ibucontext);
|
||||
|
||||
pr_debug("ib_dev %p entries %d\n", ibdev, entries);
|
||||
if (attr->flags)
|
||||
@ -1017,8 +1017,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
if (vector >= rhp->rdev.lldi.nciq)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (ib_context) {
|
||||
ucontext = to_c4iw_ucontext(ib_context);
|
||||
if (udata) {
|
||||
if (udata->inlen < sizeof(ucmd))
|
||||
ucontext->is_32b_cqe = 1;
|
||||
}
|
||||
@ -1070,7 +1069,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
/*
|
||||
* memsize must be a multiple of the page size if its a user cq.
|
||||
*/
|
||||
if (ucontext)
|
||||
if (udata)
|
||||
memsize = roundup(memsize, PAGE_SIZE);
|
||||
|
||||
chp->cq.size = hwentries;
|
||||
|
@ -995,7 +995,6 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
|
||||
int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
|
||||
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *ib_context,
|
||||
struct ib_udata *udata);
|
||||
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
||||
int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
|
||||
|
@ -204,8 +204,7 @@ static void c4iw_deallocate_pd(struct ib_pd *pd, struct ib_udata *udata)
|
||||
mutex_unlock(&rhp->rdev.stats.lock);
|
||||
}
|
||||
|
||||
static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
|
||||
{
|
||||
struct c4iw_pd *php = to_c4iw_pd(pd);
|
||||
struct ib_device *ibdev = pd->device;
|
||||
@ -220,7 +219,7 @@ static int c4iw_allocate_pd(struct ib_pd *pd, struct ib_ucontext *context,
|
||||
|
||||
php->pdid = pdid;
|
||||
php->rhp = rhp;
|
||||
if (context) {
|
||||
if (udata) {
|
||||
struct c4iw_alloc_pd_resp uresp = {.pdid = php->pdid};
|
||||
|
||||
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
|
||||
|
@ -302,7 +302,6 @@ static void hns_roce_ib_free_cq_buf(struct hns_roce_dev *hr_dev,
|
||||
|
||||
struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
|
||||
@ -314,6 +313,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
||||
int vector = attr->comp_vector;
|
||||
int cq_entries = attr->cqe;
|
||||
int ret;
|
||||
struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
|
||||
udata, struct hns_roce_ucontext, ibucontext);
|
||||
|
||||
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
|
||||
dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
|
||||
@ -332,7 +333,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
||||
hr_cq->ib_cq.cqe = cq_entries - 1;
|
||||
spin_lock_init(&hr_cq->lock);
|
||||
|
||||
if (context) {
|
||||
if (udata) {
|
||||
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
|
||||
dev_err(dev, "Failed to copy_from_udata.\n");
|
||||
ret = -EFAULT;
|
||||
@ -350,8 +351,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
||||
|
||||
if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
|
||||
(udata->outlen >= sizeof(resp))) {
|
||||
ret = hns_roce_db_map_user(to_hr_ucontext(context),
|
||||
udata, ucmd.db_addr,
|
||||
ret = hns_roce_db_map_user(context, udata, ucmd.db_addr,
|
||||
&hr_cq->db);
|
||||
if (ret) {
|
||||
dev_err(dev, "cq record doorbell map failed!\n");
|
||||
@ -362,7 +362,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
||||
}
|
||||
|
||||
/* Get user space parameters */
|
||||
uar = &to_hr_ucontext(context)->uar;
|
||||
uar = &context->uar;
|
||||
} else {
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
|
||||
ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
|
||||
@ -401,7 +401,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
||||
* problems if tptr is set to zero here, so we initialze it in user
|
||||
* space.
|
||||
*/
|
||||
if (!context && hr_cq->tptr_addr)
|
||||
if (!udata && hr_cq->tptr_addr)
|
||||
*hr_cq->tptr_addr = 0;
|
||||
|
||||
/* Get created cq handler and carry out event */
|
||||
@ -409,7 +409,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
||||
hr_cq->event = hns_roce_ib_cq_event;
|
||||
hr_cq->cq_depth = cq_entries;
|
||||
|
||||
if (context) {
|
||||
if (udata) {
|
||||
resp.cqn = hr_cq->cqn;
|
||||
ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
|
||||
if (ret)
|
||||
@ -422,21 +422,20 @@ err_cqc:
|
||||
hns_roce_free_cq(hr_dev, hr_cq);
|
||||
|
||||
err_dbmap:
|
||||
if (context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
|
||||
if (udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
|
||||
(udata->outlen >= sizeof(resp)))
|
||||
hns_roce_db_unmap_user(to_hr_ucontext(context),
|
||||
&hr_cq->db);
|
||||
hns_roce_db_unmap_user(context, &hr_cq->db);
|
||||
|
||||
err_mtt:
|
||||
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
|
||||
if (context)
|
||||
if (udata)
|
||||
ib_umem_release(hr_cq->umem);
|
||||
else
|
||||
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
|
||||
hr_cq->ib_cq.cqe);
|
||||
|
||||
err_db:
|
||||
if (!context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
|
||||
if (!udata && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
|
||||
hns_roce_free_db(hr_dev, &hr_cq->db);
|
||||
|
||||
err_cq:
|
||||
|
@ -1112,8 +1112,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *pd,
|
||||
int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
|
||||
int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
|
||||
|
||||
int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
||||
void hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
||||
|
||||
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
|
||||
@ -1177,7 +1176,6 @@ int to_hr_qp_type(int qp_type);
|
||||
|
||||
struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
|
||||
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
|
||||
|
@ -730,7 +730,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
|
||||
/* Reserved cq for loop qp */
|
||||
cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
|
||||
cq_init_attr.comp_vector = 0;
|
||||
cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL);
|
||||
cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL);
|
||||
if (IS_ERR(cq)) {
|
||||
dev_err(dev, "Create cq for reserved loop qp failed!");
|
||||
return -ENOMEM;
|
||||
@ -749,7 +749,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
|
||||
goto alloc_mem_failed;
|
||||
|
||||
pd->device = ibdev;
|
||||
ret = hns_roce_alloc_pd(pd, NULL, NULL);
|
||||
ret = hns_roce_alloc_pd(pd, NULL);
|
||||
if (ret)
|
||||
goto alloc_pd_failed;
|
||||
|
||||
|
@ -57,8 +57,7 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev)
|
||||
hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap);
|
||||
}
|
||||
|
||||
int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ib_dev = ibpd->device;
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
|
||||
@ -72,7 +71,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (context) {
|
||||
if (udata) {
|
||||
struct hns_roce_ib_alloc_pd_resp uresp = {.pdn = pd->pdn};
|
||||
|
||||
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
|
||||
|
@ -291,18 +291,15 @@ static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_
|
||||
/**
|
||||
* i40iw_alloc_pd - allocate protection domain
|
||||
* @pd: PD pointer
|
||||
* @context: user context created during alloc
|
||||
* @udata: user data
|
||||
*/
|
||||
static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
|
||||
{
|
||||
struct i40iw_pd *iwpd = to_iwpd(pd);
|
||||
struct i40iw_device *iwdev = to_iwdev(pd->device);
|
||||
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
|
||||
struct i40iw_alloc_pd_resp uresp;
|
||||
struct i40iw_sc_pd *sc_pd;
|
||||
struct i40iw_ucontext *ucontext;
|
||||
u32 pd_id = 0;
|
||||
int err;
|
||||
|
||||
@ -318,8 +315,9 @@ static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
|
||||
|
||||
sc_pd = &iwpd->sc_pd;
|
||||
|
||||
if (context) {
|
||||
ucontext = to_ucontext(context);
|
||||
if (udata) {
|
||||
struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
|
||||
udata, struct i40iw_ucontext, ibucontext);
|
||||
dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
|
||||
memset(&uresp, 0, sizeof(uresp));
|
||||
uresp.pd_id = pd_id;
|
||||
@ -1091,12 +1089,10 @@ static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
* i40iw_create_cq - create cq
|
||||
* @ibdev: device pointer from stack
|
||||
* @attr: attributes for cq
|
||||
* @context: user context created during alloc
|
||||
* @udata: user data
|
||||
*/
|
||||
static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct i40iw_device *iwdev = to_iwdev(ibdev);
|
||||
@ -1146,14 +1142,14 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
|
||||
info.ceq_id_valid = true;
|
||||
info.ceqe_mask = 1;
|
||||
info.type = I40IW_CQ_TYPE_IWARP;
|
||||
if (context) {
|
||||
struct i40iw_ucontext *ucontext;
|
||||
if (udata) {
|
||||
struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
|
||||
udata, struct i40iw_ucontext, ibucontext);
|
||||
struct i40iw_create_cq_req req;
|
||||
struct i40iw_cq_mr *cqmr;
|
||||
|
||||
memset(&req, 0, sizeof(req));
|
||||
iwcq->user_mode = true;
|
||||
ucontext = to_ucontext(context);
|
||||
if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
|
||||
err_code = -EFAULT;
|
||||
goto cq_free_resources;
|
||||
@ -1223,7 +1219,7 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
|
||||
goto cq_free_resources;
|
||||
}
|
||||
|
||||
if (context) {
|
||||
if (udata) {
|
||||
struct i40iw_create_cq_resp resp;
|
||||
|
||||
memset(&resp, 0, sizeof(resp));
|
||||
|
@ -174,7 +174,6 @@ err_buf:
|
||||
#define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
|
||||
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int entries = attr->cqe;
|
||||
@ -184,6 +183,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
|
||||
struct mlx4_uar *uar;
|
||||
void *buf_addr;
|
||||
int err;
|
||||
struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
|
||||
udata, struct mlx4_ib_ucontext, ibucontext);
|
||||
|
||||
if (entries < 1 || entries > dev->dev->caps.max_cqes)
|
||||
return ERR_PTR(-EINVAL);
|
||||
@ -205,7 +206,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
|
||||
INIT_LIST_HEAD(&cq->send_qp_list);
|
||||
INIT_LIST_HEAD(&cq->recv_qp_list);
|
||||
|
||||
if (context) {
|
||||
if (udata) {
|
||||
struct mlx4_ib_create_cq ucmd;
|
||||
|
||||
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
|
||||
@ -219,12 +220,11 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
|
||||
if (err)
|
||||
goto err_cq;
|
||||
|
||||
err = mlx4_ib_db_map_user(to_mucontext(context), udata,
|
||||
ucmd.db_addr, &cq->db);
|
||||
err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
|
||||
uar = &to_mucontext(context)->uar;
|
||||
uar = &context->uar;
|
||||
cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS;
|
||||
} else {
|
||||
err = mlx4_db_alloc(dev->dev, &cq->db, 1);
|
||||
@ -249,21 +249,21 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
|
||||
if (dev->eq_table)
|
||||
vector = dev->eq_table[vector % ibdev->num_comp_vectors];
|
||||
|
||||
err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
|
||||
cq->db.dma, &cq->mcq, vector, 0,
|
||||
err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma,
|
||||
&cq->mcq, vector, 0,
|
||||
!!(cq->create_flags &
|
||||
IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION),
|
||||
buf_addr, !!context);
|
||||
buf_addr, !!udata);
|
||||
if (err)
|
||||
goto err_dbmap;
|
||||
|
||||
if (context)
|
||||
if (udata)
|
||||
cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
|
||||
else
|
||||
cq->mcq.comp = mlx4_ib_cq_comp;
|
||||
cq->mcq.event = mlx4_ib_cq_event;
|
||||
|
||||
if (context)
|
||||
if (udata)
|
||||
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) {
|
||||
err = -EFAULT;
|
||||
goto err_cq_free;
|
||||
@ -275,19 +275,19 @@ err_cq_free:
|
||||
mlx4_cq_free(dev->dev, &cq->mcq);
|
||||
|
||||
err_dbmap:
|
||||
if (context)
|
||||
mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db);
|
||||
if (udata)
|
||||
mlx4_ib_db_unmap_user(context, &cq->db);
|
||||
|
||||
err_mtt:
|
||||
mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
|
||||
|
||||
if (context)
|
||||
if (udata)
|
||||
ib_umem_release(cq->umem);
|
||||
else
|
||||
mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
|
||||
|
||||
err_db:
|
||||
if (!context)
|
||||
if (!udata)
|
||||
mlx4_db_free(dev->dev, &cq->db);
|
||||
|
||||
err_cq:
|
||||
|
@ -31,6 +31,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <rdma/uverbs_ioctl.h>
|
||||
|
||||
#include "mlx4_ib.h"
|
||||
|
||||
@ -41,12 +42,13 @@ struct mlx4_ib_user_db_page {
|
||||
int refcnt;
|
||||
};
|
||||
|
||||
int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context,
|
||||
struct ib_udata *udata, unsigned long virt,
|
||||
int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
|
||||
struct mlx4_db *db)
|
||||
{
|
||||
struct mlx4_ib_user_db_page *page;
|
||||
int err = 0;
|
||||
struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context(
|
||||
udata, struct mlx4_ib_ucontext, ibucontext);
|
||||
|
||||
mutex_lock(&context->db_page_mutex);
|
||||
|
||||
|
@ -1177,8 +1177,7 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
{
|
||||
struct mlx4_ib_pd *pd = to_mpd(ibpd);
|
||||
struct ib_device *ibdev = ibpd->device;
|
||||
@ -1188,7 +1187,7 @@ static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (context && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
|
||||
if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
|
||||
mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
|
||||
return -EFAULT;
|
||||
}
|
||||
@ -1201,7 +1200,6 @@ static void mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
|
||||
}
|
||||
|
||||
static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct mlx4_ib_xrcd *xrcd;
|
||||
|
@ -723,8 +723,7 @@ static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
|
||||
int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
|
||||
void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
|
||||
|
||||
int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context,
|
||||
struct ib_udata *udata, unsigned long virt,
|
||||
int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
|
||||
struct mlx4_db *db);
|
||||
void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
|
||||
|
||||
@ -746,7 +745,6 @@ int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
||||
int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
|
||||
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
|
@ -1041,11 +1041,11 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
|
||||
goto err_mtt;
|
||||
|
||||
if (qp_has_rq(init_attr)) {
|
||||
err = mlx4_ib_db_map_user(
|
||||
context, udata,
|
||||
(src == MLX4_IB_QP_SRC) ? ucmd.qp.db_addr :
|
||||
err = mlx4_ib_db_map_user(udata,
|
||||
(src == MLX4_IB_QP_SRC) ?
|
||||
ucmd.qp.db_addr :
|
||||
ucmd.wq.db_addr,
|
||||
&qp->db);
|
||||
&qp->db);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
}
|
||||
|
@ -131,8 +131,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
|
||||
err = mlx4_ib_db_map_user(ucontext, udata, ucmd.db_addr,
|
||||
&srq->db);
|
||||
err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
} else {
|
||||
|
@ -679,8 +679,7 @@ static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
|
||||
}
|
||||
|
||||
static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
|
||||
struct ib_ucontext *context, struct mlx5_ib_cq *cq,
|
||||
int entries, u32 **cqb,
|
||||
struct mlx5_ib_cq *cq, int entries, u32 **cqb,
|
||||
int *cqe_size, int *index, int *inlen)
|
||||
{
|
||||
struct mlx5_ib_create_cq ucmd = {};
|
||||
@ -691,6 +690,8 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
|
||||
int ncont;
|
||||
void *cqc;
|
||||
int err;
|
||||
struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
|
||||
udata, struct mlx5_ib_ucontext, ibucontext);
|
||||
|
||||
ucmdlen = udata->inlen < sizeof(ucmd) ?
|
||||
(sizeof(ucmd) - sizeof(ucmd.flags)) : sizeof(ucmd);
|
||||
@ -715,8 +716,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_ib_db_map_user(to_mucontext(context), udata, ucmd.db_addr,
|
||||
&cq->db);
|
||||
err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db);
|
||||
if (err)
|
||||
goto err_umem;
|
||||
|
||||
@ -740,7 +740,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
|
||||
MLX5_SET(cqc, cqc, log_page_size,
|
||||
page_shift - MLX5_ADAPTER_PAGE_SHIFT);
|
||||
|
||||
*index = to_mucontext(context)->bfregi.sys_pages[0];
|
||||
*index = context->bfregi.sys_pages[0];
|
||||
|
||||
if (ucmd.cqe_comp_en == 1) {
|
||||
int mini_cqe_format;
|
||||
@ -782,14 +782,14 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
|
||||
cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
|
||||
}
|
||||
|
||||
MLX5_SET(create_cq_in, *cqb, uid, to_mucontext(context)->devx_uid);
|
||||
MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid);
|
||||
return 0;
|
||||
|
||||
err_cqb:
|
||||
kvfree(*cqb);
|
||||
|
||||
err_db:
|
||||
mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
|
||||
mlx5_ib_db_unmap_user(context, &cq->db);
|
||||
|
||||
err_umem:
|
||||
ib_umem_release(cq->buf.umem);
|
||||
@ -886,7 +886,6 @@ static void notify_soft_wc_handler(struct work_struct *work)
|
||||
|
||||
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int entries = attr->cqe;
|
||||
@ -927,8 +926,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
|
||||
INIT_LIST_HEAD(&cq->list_recv_qp);
|
||||
|
||||
if (udata) {
|
||||
err = create_cq_user(dev, udata, context, cq, entries,
|
||||
&cqb, &cqe_size, &index, &inlen);
|
||||
err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
|
||||
&index, &inlen);
|
||||
if (err)
|
||||
goto err_create;
|
||||
} else {
|
||||
@ -965,7 +964,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
|
||||
|
||||
mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
|
||||
cq->mcq.irqn = irqn;
|
||||
if (context)
|
||||
if (udata)
|
||||
cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
|
||||
else
|
||||
cq->mcq.comp = mlx5_ib_cq_comp;
|
||||
@ -973,7 +972,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
|
||||
|
||||
INIT_LIST_HEAD(&cq->wc_list);
|
||||
|
||||
if (context)
|
||||
if (udata)
|
||||
if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
|
||||
err = -EFAULT;
|
||||
goto err_cmd;
|
||||
|
@ -2341,8 +2341,7 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
{
|
||||
struct mlx5_ib_pd *pd = to_mpd(ibpd);
|
||||
struct ib_device *ibdev = ibpd->device;
|
||||
@ -2351,8 +2350,10 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
|
||||
u16 uid = 0;
|
||||
struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
|
||||
udata, struct mlx5_ib_ucontext, ibucontext);
|
||||
|
||||
uid = context ? to_mucontext(context)->devx_uid : 0;
|
||||
uid = context ? context->devx_uid : 0;
|
||||
MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
|
||||
MLX5_SET(alloc_pd_in, in, uid, uid);
|
||||
err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
|
||||
@ -2362,7 +2363,7 @@ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
|
||||
pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
|
||||
pd->uid = uid;
|
||||
if (context) {
|
||||
if (udata) {
|
||||
resp.pdn = pd->pdn;
|
||||
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
|
||||
mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
|
||||
@ -4749,11 +4750,11 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
|
||||
devr->p0->uobject = NULL;
|
||||
atomic_set(&devr->p0->usecnt, 0);
|
||||
|
||||
ret = mlx5_ib_alloc_pd(devr->p0, NULL, NULL);
|
||||
ret = mlx5_ib_alloc_pd(devr->p0, NULL);
|
||||
if (ret)
|
||||
goto error0;
|
||||
|
||||
devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
|
||||
devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL);
|
||||
if (IS_ERR(devr->c0)) {
|
||||
ret = PTR_ERR(devr->c0);
|
||||
goto error1;
|
||||
@ -4765,7 +4766,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
|
||||
devr->c0->cq_context = NULL;
|
||||
atomic_set(&devr->c0->usecnt, 0);
|
||||
|
||||
devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
|
||||
devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
|
||||
if (IS_ERR(devr->x0)) {
|
||||
ret = PTR_ERR(devr->x0);
|
||||
goto error2;
|
||||
@ -4776,7 +4777,7 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
|
||||
mutex_init(&devr->x0->tgt_qp_mutex);
|
||||
INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
|
||||
|
||||
devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
|
||||
devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
|
||||
if (IS_ERR(devr->x1)) {
|
||||
ret = PTR_ERR(devr->x1);
|
||||
goto error3;
|
||||
|
@ -1083,7 +1083,6 @@ int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
|
||||
void *buffer, int buflen, size_t *bc);
|
||||
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
@ -1123,8 +1122,7 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
||||
struct ib_mad_hdr *out, size_t *out_mad_size,
|
||||
u16 *out_mad_pkey_index);
|
||||
struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
struct ib_udata *udata);
|
||||
int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
|
||||
int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
|
||||
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
|
||||
|
@ -5632,8 +5632,7 @@ out:
|
||||
}
|
||||
|
||||
struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
||||
struct mlx5_ib_xrcd *xrcd;
|
||||
|
@ -363,18 +363,17 @@ static int mthca_mmap_uar(struct ib_ucontext *context,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = ibpd->device;
|
||||
struct mthca_pd *pd = to_mpd(ibpd);
|
||||
int err;
|
||||
|
||||
err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
|
||||
err = mthca_pd_alloc(to_mdev(ibdev), !udata, pd);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (context) {
|
||||
if (udata) {
|
||||
if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
|
||||
mthca_pd_free(to_mdev(ibdev), pd);
|
||||
return -EFAULT;
|
||||
@ -634,7 +633,6 @@ static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
|
||||
|
||||
static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int entries = attr->cqe;
|
||||
@ -642,6 +640,8 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
|
||||
struct mthca_cq *cq;
|
||||
int nent;
|
||||
int err;
|
||||
struct mthca_ucontext *context = rdma_udata_to_drv_context(
|
||||
udata, struct mthca_ucontext, ibucontext);
|
||||
|
||||
if (attr->flags)
|
||||
return ERR_PTR(-EINVAL);
|
||||
@ -649,19 +649,19 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
|
||||
if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (context) {
|
||||
if (udata) {
|
||||
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
|
||||
to_mucontext(context)->db_tab,
|
||||
ucmd.set_db_index, ucmd.set_db_page);
|
||||
err = mthca_map_user_db(to_mdev(ibdev), &context->uar,
|
||||
context->db_tab, ucmd.set_db_index,
|
||||
ucmd.set_db_page);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
|
||||
to_mucontext(context)->db_tab,
|
||||
ucmd.arm_db_index, ucmd.arm_db_page);
|
||||
err = mthca_map_user_db(to_mdev(ibdev), &context->uar,
|
||||
context->db_tab, ucmd.arm_db_index,
|
||||
ucmd.arm_db_page);
|
||||
if (err)
|
||||
goto err_unmap_set;
|
||||
}
|
||||
@ -672,7 +672,7 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
|
||||
goto err_unmap_arm;
|
||||
}
|
||||
|
||||
if (context) {
|
||||
if (udata) {
|
||||
cq->buf.mr.ibmr.lkey = ucmd.lkey;
|
||||
cq->set_ci_db_index = ucmd.set_db_index;
|
||||
cq->arm_db_index = ucmd.arm_db_index;
|
||||
@ -681,14 +681,13 @@ static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
|
||||
for (nent = 1; nent <= entries; nent <<= 1)
|
||||
; /* nothing */
|
||||
|
||||
err = mthca_init_cq(to_mdev(ibdev), nent,
|
||||
context ? to_mucontext(context) : NULL,
|
||||
context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
|
||||
err = mthca_init_cq(to_mdev(ibdev), nent, context,
|
||||
udata ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
|
||||
cq);
|
||||
if (err)
|
||||
goto err_free;
|
||||
|
||||
if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
|
||||
if (udata && ib_copy_to_udata(udata, &cq->cqn, sizeof(__u32))) {
|
||||
mthca_free_cq(to_mdev(ibdev), cq);
|
||||
err = -EFAULT;
|
||||
goto err_free;
|
||||
@ -702,14 +701,14 @@ err_free:
|
||||
kfree(cq);
|
||||
|
||||
err_unmap_arm:
|
||||
if (context)
|
||||
mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
|
||||
to_mucontext(context)->db_tab, ucmd.arm_db_index);
|
||||
if (udata)
|
||||
mthca_unmap_user_db(to_mdev(ibdev), &context->uar,
|
||||
context->db_tab, ucmd.arm_db_index);
|
||||
|
||||
err_unmap_set:
|
||||
if (context)
|
||||
mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
|
||||
to_mucontext(context)->db_tab, ucmd.set_db_index);
|
||||
if (udata)
|
||||
mthca_unmap_user_db(to_mdev(ibdev), &context->uar,
|
||||
context->db_tab, ucmd.set_db_index);
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
@ -640,22 +640,24 @@ static int nes_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
/**
|
||||
* nes_alloc_pd
|
||||
*/
|
||||
static int nes_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
static int nes_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = pd->device;
|
||||
struct nes_pd *nespd = to_nespd(pd);
|
||||
struct nes_vnic *nesvnic = to_nesvnic(ibdev);
|
||||
struct nes_device *nesdev = nesvnic->nesdev;
|
||||
struct nes_adapter *nesadapter = nesdev->nesadapter;
|
||||
struct nes_ucontext *nesucontext;
|
||||
struct nes_alloc_pd_resp uresp;
|
||||
u32 pd_num = 0;
|
||||
int err;
|
||||
struct nes_ucontext *nesucontext = rdma_udata_to_drv_context(
|
||||
udata, struct nes_ucontext, ibucontext);
|
||||
|
||||
nes_debug(NES_DBG_PD, "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n",
|
||||
nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev, context,
|
||||
netdev_refcnt_read(nesvnic->netdev));
|
||||
nes_debug(
|
||||
NES_DBG_PD,
|
||||
"nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n",
|
||||
nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev,
|
||||
&nesucontext->ibucontext, netdev_refcnt_read(nesvnic->netdev));
|
||||
|
||||
err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds,
|
||||
nesadapter->max_pd, &pd_num, &nesadapter->next_pd, NES_RESOURCE_PD);
|
||||
@ -667,8 +669,7 @@ static int nes_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
|
||||
|
||||
nespd->pd_id = (pd_num << (PAGE_SHIFT-12)) + nesadapter->base_pd;
|
||||
|
||||
if (context) {
|
||||
nesucontext = to_nesucontext(context);
|
||||
if (udata) {
|
||||
nespd->mmap_db_index = find_next_zero_bit(nesucontext->allocated_doorbells,
|
||||
NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db);
|
||||
nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n",
|
||||
@ -1375,7 +1376,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
||||
*/
|
||||
static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int entries = attr->cqe;
|
||||
@ -1420,9 +1420,10 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
|
||||
nescq->hw_cq.cq_number = cq_num;
|
||||
nescq->ibcq.cqe = nescq->hw_cq.cq_size - 1;
|
||||
|
||||
if (udata) {
|
||||
struct nes_ucontext *nes_ucontext = rdma_udata_to_drv_context(
|
||||
udata, struct nes_ucontext, ibucontext);
|
||||
|
||||
if (context) {
|
||||
nes_ucontext = to_nesucontext(context);
|
||||
if (ib_copy_from_udata(&req, udata, sizeof (struct nes_create_cq_req))) {
|
||||
nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
|
||||
kfree(nescq);
|
||||
@ -1489,7 +1490,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
|
||||
cqp_request = nes_get_cqp_request(nesdev);
|
||||
if (cqp_request == NULL) {
|
||||
nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n");
|
||||
if (!context)
|
||||
if (!udata)
|
||||
pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
|
||||
nescq->hw_cq.cq_pbase);
|
||||
else {
|
||||
@ -1518,7 +1519,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
|
||||
if (nesadapter->free_4kpbl == 0) {
|
||||
spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
|
||||
nes_free_cqp_request(nesdev, cqp_request);
|
||||
if (!context)
|
||||
if (!udata)
|
||||
pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
|
||||
nescq->hw_cq.cq_pbase);
|
||||
else {
|
||||
@ -1540,7 +1541,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
|
||||
if (nesadapter->free_256pbl == 0) {
|
||||
spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
|
||||
nes_free_cqp_request(nesdev, cqp_request);
|
||||
if (!context)
|
||||
if (!udata)
|
||||
pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
|
||||
nescq->hw_cq.cq_pbase);
|
||||
else {
|
||||
@ -1566,7 +1567,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
|
||||
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
|
||||
(nescq->hw_cq.cq_number | ((u32)nesdev->ceq_index << 16)));
|
||||
|
||||
if (context) {
|
||||
if (udata) {
|
||||
if (pbl_entries != 1)
|
||||
u64temp = (u64)nespbl->pbl_pbase;
|
||||
else
|
||||
@ -1597,7 +1598,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
|
||||
nescq->hw_cq.cq_number, ret);
|
||||
if ((!ret) || (cqp_request->major_code)) {
|
||||
nes_put_cqp_request(nesdev, cqp_request);
|
||||
if (!context)
|
||||
if (!udata)
|
||||
pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
|
||||
nescq->hw_cq.cq_pbase);
|
||||
else {
|
||||
@ -1611,7 +1612,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
|
||||
}
|
||||
nes_put_cqp_request(nesdev, cqp_request);
|
||||
|
||||
if (context) {
|
||||
if (udata) {
|
||||
/* free the nespbl */
|
||||
pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
|
||||
nespbl->pbl_pbase);
|
||||
|
@ -47,6 +47,7 @@
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/ib_addr.h>
|
||||
#include <rdma/ib_cache.h>
|
||||
#include <rdma/uverbs_ioctl.h>
|
||||
|
||||
#include "ocrdma.h"
|
||||
#include "ocrdma_hw.h"
|
||||
@ -367,6 +368,16 @@ static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
|
||||
return status;
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE:
|
||||
*
|
||||
* ocrdma_ucontext must be used here because this function is also
|
||||
* called from ocrdma_alloc_ucontext where ib_udata does not have
|
||||
* valid ib_ucontext pointer. ib_uverbs_get_context does not call
|
||||
* uobj_{alloc|get_xxx} helpers which are used to store the
|
||||
* ib_ucontext in uverbs_attr_bundle wrapping the ib_udata. so
|
||||
* ib_udata does NOT imply valid ib_ucontext here!
|
||||
*/
|
||||
static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
|
||||
struct ocrdma_ucontext *uctx,
|
||||
struct ib_udata *udata)
|
||||
@ -593,7 +604,6 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
}
|
||||
|
||||
static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
|
||||
struct ib_ucontext *ib_ctx,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int status;
|
||||
@ -601,7 +611,8 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
|
||||
u64 dpp_page_addr = 0;
|
||||
u32 db_page_size;
|
||||
struct ocrdma_alloc_pd_uresp rsp;
|
||||
struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
|
||||
struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
|
||||
udata, struct ocrdma_ucontext, ibucontext);
|
||||
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
rsp.id = pd->id;
|
||||
@ -639,18 +650,17 @@ dpp_map_err:
|
||||
return status;
|
||||
}
|
||||
|
||||
int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = ibpd->device;
|
||||
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
|
||||
struct ocrdma_pd *pd;
|
||||
struct ocrdma_ucontext *uctx = NULL;
|
||||
int status;
|
||||
u8 is_uctx_pd = false;
|
||||
struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
|
||||
udata, struct ocrdma_ucontext, ibucontext);
|
||||
|
||||
if (udata && context) {
|
||||
uctx = get_ocrdma_ucontext(context);
|
||||
if (udata) {
|
||||
pd = ocrdma_get_ucontext_pd(uctx);
|
||||
if (pd) {
|
||||
is_uctx_pd = true;
|
||||
@ -664,8 +674,8 @@ int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
goto exit;
|
||||
|
||||
pd_mapping:
|
||||
if (udata && context) {
|
||||
status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
|
||||
if (udata) {
|
||||
status = ocrdma_copy_pd_uresp(dev, pd, udata);
|
||||
if (status)
|
||||
goto err;
|
||||
}
|
||||
@ -946,13 +956,17 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
|
||||
}
|
||||
|
||||
static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
|
||||
struct ib_udata *udata,
|
||||
struct ib_ucontext *ib_ctx)
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int status;
|
||||
struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
|
||||
struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
|
||||
udata, struct ocrdma_ucontext, ibucontext);
|
||||
struct ocrdma_create_cq_uresp uresp;
|
||||
|
||||
/* this must be user flow! */
|
||||
if (!udata)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&uresp, 0, sizeof(uresp));
|
||||
uresp.cq_id = cq->id;
|
||||
uresp.page_size = PAGE_ALIGN(cq->len);
|
||||
@ -983,13 +997,13 @@ err:
|
||||
|
||||
struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *ib_ctx,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int entries = attr->cqe;
|
||||
struct ocrdma_cq *cq;
|
||||
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
|
||||
struct ocrdma_ucontext *uctx = NULL;
|
||||
struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
|
||||
udata, struct ocrdma_ucontext, ibucontext);
|
||||
u16 pd_id = 0;
|
||||
int status;
|
||||
struct ocrdma_create_cq_ureq ureq;
|
||||
@ -1011,18 +1025,16 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
|
||||
INIT_LIST_HEAD(&cq->sq_head);
|
||||
INIT_LIST_HEAD(&cq->rq_head);
|
||||
|
||||
if (ib_ctx) {
|
||||
uctx = get_ocrdma_ucontext(ib_ctx);
|
||||
if (udata)
|
||||
pd_id = uctx->cntxt_pd->id;
|
||||
}
|
||||
|
||||
status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
|
||||
if (status) {
|
||||
kfree(cq);
|
||||
return ERR_PTR(status);
|
||||
}
|
||||
if (ib_ctx) {
|
||||
status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
|
||||
if (udata) {
|
||||
status = ocrdma_copy_cq_uresp(dev, cq, udata);
|
||||
if (status)
|
||||
goto ctx_err;
|
||||
}
|
||||
|
@ -69,13 +69,11 @@ void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx);
|
||||
|
||||
int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
|
||||
|
||||
int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx,
|
||||
struct ib_udata *udata);
|
||||
int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
||||
void ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
||||
|
||||
struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *ib_ctx,
|
||||
struct ib_udata *udata);
|
||||
int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
|
||||
int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/ib_addr.h>
|
||||
#include <rdma/ib_cache.h>
|
||||
#include <rdma/uverbs_ioctl.h>
|
||||
|
||||
#include <linux/qed/common_hsi.h>
|
||||
#include "qedr_hsi_rdma.h"
|
||||
@ -436,8 +437,7 @@ int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
|
||||
int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = ibpd->device;
|
||||
struct qedr_dev *dev = get_qedr_dev(ibdev);
|
||||
@ -446,7 +446,7 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
int rc;
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
|
||||
(udata && context) ? "User Lib" : "Kernel");
|
||||
udata ? "User Lib" : "Kernel");
|
||||
|
||||
if (!dev->rdma_ctx) {
|
||||
DP_ERR(dev, "invalid RDMA context\n");
|
||||
@ -459,10 +459,12 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
|
||||
pd->pd_id = pd_id;
|
||||
|
||||
if (udata && context) {
|
||||
if (udata) {
|
||||
struct qedr_alloc_pd_uresp uresp = {
|
||||
.pd_id = pd_id,
|
||||
};
|
||||
struct qedr_ucontext *context = rdma_udata_to_drv_context(
|
||||
udata, struct qedr_ucontext, ibucontext);
|
||||
|
||||
rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
|
||||
if (rc) {
|
||||
@ -471,7 +473,7 @@ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
return rc;
|
||||
}
|
||||
|
||||
pd->uctx = get_qedr_ucontext(context);
|
||||
pd->uctx = context;
|
||||
pd->uctx->pd = pd;
|
||||
}
|
||||
|
||||
@ -816,9 +818,10 @@ int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
|
||||
|
||||
struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *ib_ctx, struct ib_udata *udata)
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
|
||||
struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
|
||||
udata, struct qedr_ucontext, ibucontext);
|
||||
struct qed_rdma_destroy_cq_out_params destroy_oparams;
|
||||
struct qed_rdma_destroy_cq_in_params destroy_iparams;
|
||||
struct qedr_dev *dev = get_qedr_dev(ibdev);
|
||||
@ -906,7 +909,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
|
||||
cq->sig = QEDR_CQ_MAGIC_NUMBER;
|
||||
spin_lock_init(&cq->cq_lock);
|
||||
|
||||
if (ib_ctx) {
|
||||
if (udata) {
|
||||
rc = qedr_copy_cq_uresp(dev, cq, udata);
|
||||
if (rc)
|
||||
goto err3;
|
||||
|
@ -47,13 +47,11 @@ int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
|
||||
void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
|
||||
|
||||
int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
|
||||
int qedr_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx,
|
||||
struct ib_udata *udata);
|
||||
int qedr_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
||||
void qedr_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
||||
|
||||
struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *ib_ctx,
|
||||
struct ib_udata *udata);
|
||||
int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
|
||||
int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
|
@ -447,8 +447,7 @@ int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
{
|
||||
struct usnic_ib_pd *pd = to_upd(ibpd);
|
||||
void *umem_pd;
|
||||
@ -590,7 +589,6 @@ out_unlock:
|
||||
|
||||
struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct ib_cq *cq;
|
||||
|
@ -50,8 +50,7 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
|
||||
union ib_gid *gid);
|
||||
int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
|
||||
u16 *pkey);
|
||||
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
|
||||
void usnic_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
||||
struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
@ -61,7 +60,6 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
|
@ -49,6 +49,7 @@
|
||||
#include <rdma/ib_addr.h>
|
||||
#include <rdma/ib_smi.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include <rdma/uverbs_ioctl.h>
|
||||
|
||||
#include "pvrdma.h"
|
||||
|
||||
@ -93,7 +94,6 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq,
|
||||
* pvrdma_create_cq - create completion queue
|
||||
* @ibdev: the device
|
||||
* @attr: completion queue attributes
|
||||
* @context: user context
|
||||
* @udata: user data
|
||||
*
|
||||
* @return: ib_cq completion queue pointer on success,
|
||||
@ -101,7 +101,6 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq,
|
||||
*/
|
||||
struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int entries = attr->cqe;
|
||||
@ -116,6 +115,8 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
|
||||
struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
|
||||
struct pvrdma_create_cq_resp cq_resp = {0};
|
||||
struct pvrdma_create_cq ucmd;
|
||||
struct pvrdma_ucontext *context = rdma_udata_to_drv_context(
|
||||
udata, struct pvrdma_ucontext, ibucontext);
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
|
||||
|
||||
@ -133,7 +134,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
|
||||
}
|
||||
|
||||
cq->ibcq.cqe = entries;
|
||||
cq->is_kernel = !context;
|
||||
cq->is_kernel = !udata;
|
||||
|
||||
if (!cq->is_kernel) {
|
||||
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
|
||||
@ -185,8 +186,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
cmd->hdr.cmd = PVRDMA_CMD_CREATE_CQ;
|
||||
cmd->nchunks = npages;
|
||||
cmd->ctx_handle = (context) ?
|
||||
(u64)to_vucontext(context)->ctx_handle : 0;
|
||||
cmd->ctx_handle = context ? context->ctx_handle : 0;
|
||||
cmd->cqe = entries;
|
||||
cmd->pdir_dma = cq->pdir.dir_dma;
|
||||
ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_CQ_RESP);
|
||||
@ -204,7 +204,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
|
||||
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
|
||||
|
||||
if (!cq->is_kernel) {
|
||||
cq->uar = &(to_vucontext(context)->uar);
|
||||
cq->uar = &context->uar;
|
||||
|
||||
/* Copy udata back. */
|
||||
if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
|
||||
|
@ -50,6 +50,7 @@
|
||||
#include <rdma/ib_smi.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
#include <rdma/vmw_pvrdma-abi.h>
|
||||
#include <rdma/uverbs_ioctl.h>
|
||||
|
||||
#include "pvrdma.h"
|
||||
|
||||
@ -419,13 +420,11 @@ int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
|
||||
/**
|
||||
* pvrdma_alloc_pd - allocate protection domain
|
||||
* @ibpd: PD pointer
|
||||
* @context: user context
|
||||
* @udata: user data
|
||||
*
|
||||
* @return: the ib_pd protection domain pointer on success, otherwise errno.
|
||||
*/
|
||||
int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = ibpd->device;
|
||||
struct pvrdma_pd *pd = to_vpd(ibpd);
|
||||
@ -436,13 +435,15 @@ int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
|
||||
struct pvrdma_alloc_pd_resp pd_resp = {0};
|
||||
int ret;
|
||||
struct pvrdma_ucontext *context = rdma_udata_to_drv_context(
|
||||
udata, struct pvrdma_ucontext, ibucontext);
|
||||
|
||||
/* Check allowed max pds */
|
||||
if (!atomic_add_unless(&dev->num_pds, 1, dev->dsr->caps.max_pd))
|
||||
return -ENOMEM;
|
||||
|
||||
cmd->hdr.cmd = PVRDMA_CMD_CREATE_PD;
|
||||
cmd->ctx_handle = (context) ? to_vucontext(context)->ctx_handle : 0;
|
||||
cmd->ctx_handle = context ? context->ctx_handle : 0;
|
||||
ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_PD_RESP);
|
||||
if (ret < 0) {
|
||||
dev_warn(&dev->pdev->dev,
|
||||
@ -451,12 +452,12 @@ int pvrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
goto err;
|
||||
}
|
||||
|
||||
pd->privileged = !context;
|
||||
pd->privileged = !udata;
|
||||
pd->pd_handle = resp->pd_handle;
|
||||
pd->pdn = resp->pd_handle;
|
||||
pd_resp.pdn = resp->pd_handle;
|
||||
|
||||
if (context) {
|
||||
if (udata) {
|
||||
if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) {
|
||||
dev_warn(&dev->pdev->dev,
|
||||
"failed to copy back protection domain\n");
|
||||
|
@ -398,8 +398,7 @@ int pvrdma_modify_port(struct ib_device *ibdev, u8 port,
|
||||
int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||
int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
|
||||
void pvrdma_dealloc_ucontext(struct ib_ucontext *context);
|
||||
int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
||||
void pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
|
||||
struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc);
|
||||
struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
@ -412,7 +411,6 @@ int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
|
||||
int sg_nents, unsigned int *sg_offset);
|
||||
struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
|
@ -168,7 +168,6 @@ static void send_complete(struct work_struct *work)
|
||||
* rvt_create_cq - create a completion queue
|
||||
* @ibdev: the device this completion queue is attached to
|
||||
* @attr: creation attributes
|
||||
* @context: unused by the QLogic_IB driver
|
||||
* @udata: user data for libibverbs.so
|
||||
*
|
||||
* Called by ib_create_cq() in the generic verbs code.
|
||||
@ -178,7 +177,6 @@ static void send_complete(struct work_struct *work)
|
||||
*/
|
||||
struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
|
||||
@ -232,7 +230,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
|
||||
if (udata && udata->outlen >= sizeof(__u64)) {
|
||||
int err;
|
||||
|
||||
cq->ip = rvt_create_mmap_info(rdi, sz, context, wc);
|
||||
cq->ip = rvt_create_mmap_info(rdi, sz, udata, wc);
|
||||
if (!cq->ip) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto bail_wc;
|
||||
|
@ -53,7 +53,6 @@
|
||||
|
||||
struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
|
||||
|
@ -49,6 +49,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <rdma/uverbs_ioctl.h>
|
||||
#include "mmap.h"
|
||||
|
||||
/**
|
||||
@ -150,18 +151,19 @@ done:
|
||||
* rvt_create_mmap_info - allocate information for hfi1_mmap
|
||||
* @rdi: rvt dev struct
|
||||
* @size: size in bytes to map
|
||||
* @context: user context
|
||||
* @udata: user data (must be valid!)
|
||||
* @obj: opaque pointer to a cq, wq etc
|
||||
*
|
||||
* Return: rvt_mmap struct on success
|
||||
*/
|
||||
struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
|
||||
u32 size,
|
||||
struct ib_ucontext *context,
|
||||
void *obj)
|
||||
struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
|
||||
struct ib_udata *udata, void *obj)
|
||||
{
|
||||
struct rvt_mmap_info *ip;
|
||||
|
||||
if (!udata)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
|
||||
if (!ip)
|
||||
return ip;
|
||||
@ -177,7 +179,9 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
|
||||
|
||||
INIT_LIST_HEAD(&ip->pending_mmaps);
|
||||
ip->size = size;
|
||||
ip->context = context;
|
||||
ip->context =
|
||||
container_of(udata, struct uverbs_attr_bundle, driver_udata)
|
||||
->context;
|
||||
ip->obj = obj;
|
||||
kref_init(&ip->ref);
|
||||
|
||||
|
@ -53,10 +53,8 @@
|
||||
void rvt_mmap_init(struct rvt_dev_info *rdi);
|
||||
void rvt_release_mmap_info(struct kref *ref);
|
||||
int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||
struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi,
|
||||
u32 size,
|
||||
struct ib_ucontext *context,
|
||||
void *obj);
|
||||
struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
|
||||
struct ib_udata *udata, void *obj);
|
||||
void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip,
|
||||
u32 size, void *obj);
|
||||
|
||||
|
@ -51,15 +51,13 @@
|
||||
/**
|
||||
* rvt_alloc_pd - allocate a protection domain
|
||||
* @ibpd: PD
|
||||
* @context: optional user context
|
||||
* @udata: optional user data
|
||||
*
|
||||
* Allocate and keep track of a PD.
|
||||
*
|
||||
* Return: 0 on success
|
||||
*/
|
||||
int rvt_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
int rvt_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = ibpd->device;
|
||||
struct rvt_dev_info *dev = ib_to_rvt(ibdev);
|
||||
|
@ -50,8 +50,7 @@
|
||||
|
||||
#include <rdma/rdma_vt.h>
|
||||
|
||||
int rvt_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int rvt_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
|
||||
void rvt_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
|
||||
|
||||
#endif /* DEF_RDMAVTPD_H */
|
||||
|
@ -957,8 +957,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
size_t sg_list_sz;
|
||||
struct ib_qp *ret = ERR_PTR(-ENOMEM);
|
||||
struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
|
||||
struct rvt_ucontext *ucontext = rdma_udata_to_drv_context(
|
||||
udata, struct rvt_ucontext, ibucontext);
|
||||
void *priv = NULL;
|
||||
size_t sqsize;
|
||||
|
||||
@ -1131,8 +1129,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
|
||||
} else {
|
||||
u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
|
||||
|
||||
qp->ip = rvt_create_mmap_info(rdi, s,
|
||||
&ucontext->ibucontext,
|
||||
qp->ip = rvt_create_mmap_info(rdi, s, udata,
|
||||
qp->r_rq.wq);
|
||||
if (!qp->ip) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
|
@ -78,8 +78,6 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
|
||||
struct rvt_ucontext *ucontext = rdma_udata_to_drv_context(
|
||||
udata, struct rvt_ucontext, ibucontext);
|
||||
struct rvt_srq *srq;
|
||||
u32 sz;
|
||||
struct ib_srq *ret;
|
||||
@ -121,9 +119,7 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
|
||||
int err;
|
||||
u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
|
||||
|
||||
srq->ip =
|
||||
rvt_create_mmap_info(dev, s, &ucontext->ibucontext,
|
||||
srq->rq.wq);
|
||||
srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
|
||||
if (!srq->ip) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto bail_wq;
|
||||
|
@ -82,7 +82,7 @@ static void rxe_send_complete(unsigned long data)
|
||||
}
|
||||
|
||||
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
||||
int comp_vector, struct ib_ucontext *context,
|
||||
int comp_vector, struct ib_udata *udata,
|
||||
struct rxe_create_cq_resp __user *uresp)
|
||||
{
|
||||
int err;
|
||||
@ -94,7 +94,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context,
|
||||
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
|
||||
cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
|
||||
if (err) {
|
||||
vfree(cq->queue->buf);
|
||||
@ -115,13 +115,13 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
||||
}
|
||||
|
||||
int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
|
||||
struct rxe_resize_cq_resp __user *uresp)
|
||||
struct rxe_resize_cq_resp __user *uresp,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
|
||||
sizeof(struct rxe_cqe),
|
||||
cq->queue->ip ? cq->queue->ip->context : NULL,
|
||||
sizeof(struct rxe_cqe), udata,
|
||||
uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
|
||||
if (!err)
|
||||
cq->ibcq.cqe = cqe;
|
||||
|
@ -53,11 +53,12 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
|
||||
int cqe, int comp_vector);
|
||||
|
||||
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
||||
int comp_vector, struct ib_ucontext *context,
|
||||
int comp_vector, struct ib_udata *udata,
|
||||
struct rxe_create_cq_resp __user *uresp);
|
||||
|
||||
int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe,
|
||||
struct rxe_resize_cq_resp __user *uresp);
|
||||
struct rxe_resize_cq_resp __user *uresp,
|
||||
struct ib_udata *udata);
|
||||
|
||||
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
|
||||
|
||||
@ -91,10 +92,8 @@ struct rxe_mmap_info {
|
||||
|
||||
void rxe_mmap_release(struct kref *ref);
|
||||
|
||||
struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev,
|
||||
u32 size,
|
||||
struct ib_ucontext *context,
|
||||
void *obj);
|
||||
struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, u32 size,
|
||||
struct ib_udata *udata, void *obj);
|
||||
|
||||
int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||
|
||||
@ -224,13 +223,12 @@ int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
|
||||
|
||||
int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
struct ib_srq_init_attr *init,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_srq_init_attr *init, struct ib_udata *udata,
|
||||
struct rxe_create_srq_resp __user *uresp);
|
||||
|
||||
int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
|
||||
struct rxe_modify_srq_cmd *ucmd);
|
||||
struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata);
|
||||
|
||||
void rxe_dealloc(struct ib_device *ib_dev);
|
||||
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/errno.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <rdma/uverbs_ioctl.h>
|
||||
|
||||
#include "rxe.h"
|
||||
#include "rxe_loc.h"
|
||||
@ -140,13 +141,14 @@ done:
|
||||
/*
|
||||
* Allocate information for rxe_mmap
|
||||
*/
|
||||
struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
|
||||
u32 size,
|
||||
struct ib_ucontext *context,
|
||||
void *obj)
|
||||
struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
|
||||
struct ib_udata *udata, void *obj)
|
||||
{
|
||||
struct rxe_mmap_info *ip;
|
||||
|
||||
if (!udata)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
|
||||
if (!ip)
|
||||
return NULL;
|
||||
@ -165,7 +167,9 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
|
||||
|
||||
INIT_LIST_HEAD(&ip->pending_mmaps);
|
||||
ip->info.size = size;
|
||||
ip->context = context;
|
||||
ip->context =
|
||||
container_of(udata, struct uverbs_attr_bundle, driver_udata)
|
||||
->context;
|
||||
ip->obj = obj;
|
||||
kref_init(&ip->ref);
|
||||
|
||||
|
@ -217,8 +217,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
}
|
||||
|
||||
static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
struct ib_qp_init_attr *init,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_qp_init_attr *init, struct ib_udata *udata,
|
||||
struct rxe_create_qp_resp __user *uresp)
|
||||
{
|
||||
int err;
|
||||
@ -254,7 +253,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
if (!qp->sq.queue)
|
||||
return -ENOMEM;
|
||||
|
||||
err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, context,
|
||||
err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
|
||||
qp->sq.queue->buf, qp->sq.queue->buf_size,
|
||||
&qp->sq.queue->ip);
|
||||
|
||||
@ -287,7 +286,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
|
||||
static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
struct ib_qp_init_attr *init,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata,
|
||||
struct rxe_create_qp_resp __user *uresp)
|
||||
{
|
||||
int err;
|
||||
@ -308,7 +307,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
if (!qp->rq.queue)
|
||||
return -ENOMEM;
|
||||
|
||||
err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, context,
|
||||
err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
|
||||
qp->rq.queue->buf, qp->rq.queue->buf_size,
|
||||
&qp->rq.queue->ip);
|
||||
if (err) {
|
||||
@ -344,8 +343,6 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
|
||||
struct rxe_cq *rcq = to_rcq(init->recv_cq);
|
||||
struct rxe_cq *scq = to_rcq(init->send_cq);
|
||||
struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
|
||||
struct rxe_ucontext *ucontext =
|
||||
rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc);
|
||||
|
||||
rxe_add_ref(pd);
|
||||
rxe_add_ref(rcq);
|
||||
@ -360,11 +357,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
|
||||
|
||||
rxe_qp_init_misc(rxe, qp, init);
|
||||
|
||||
err = rxe_qp_init_req(rxe, qp, init, &ucontext->ibuc, uresp);
|
||||
err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
|
||||
if (err)
|
||||
goto err1;
|
||||
|
||||
err = rxe_qp_init_resp(rxe, qp, init, &ucontext->ibuc, uresp);
|
||||
err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
|
||||
if (err)
|
||||
goto err2;
|
||||
|
||||
|
@ -36,18 +36,15 @@
|
||||
#include "rxe_loc.h"
|
||||
#include "rxe_queue.h"
|
||||
|
||||
int do_mmap_info(struct rxe_dev *rxe,
|
||||
struct mminfo __user *outbuf,
|
||||
struct ib_ucontext *context,
|
||||
struct rxe_queue_buf *buf,
|
||||
size_t buf_size,
|
||||
struct rxe_mmap_info **ip_p)
|
||||
int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
|
||||
struct ib_udata *udata, struct rxe_queue_buf *buf,
|
||||
size_t buf_size, struct rxe_mmap_info **ip_p)
|
||||
{
|
||||
int err;
|
||||
struct rxe_mmap_info *ip = NULL;
|
||||
|
||||
if (outbuf) {
|
||||
ip = rxe_create_mmap_info(rxe, buf_size, context, buf);
|
||||
ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
|
||||
if (!ip)
|
||||
goto err1;
|
||||
|
||||
@ -153,12 +150,9 @@ static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int rxe_queue_resize(struct rxe_queue *q,
|
||||
unsigned int *num_elem_p,
|
||||
unsigned int elem_size,
|
||||
struct ib_ucontext *context,
|
||||
struct mminfo __user *outbuf,
|
||||
spinlock_t *producer_lock,
|
||||
int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
|
||||
unsigned int elem_size, struct ib_udata *udata,
|
||||
struct mminfo __user *outbuf, spinlock_t *producer_lock,
|
||||
spinlock_t *consumer_lock)
|
||||
{
|
||||
struct rxe_queue *new_q;
|
||||
@ -170,7 +164,7 @@ int rxe_queue_resize(struct rxe_queue *q,
|
||||
if (!new_q)
|
||||
return -ENOMEM;
|
||||
|
||||
err = do_mmap_info(new_q->rxe, outbuf, context, new_q->buf,
|
||||
err = do_mmap_info(new_q->rxe, outbuf, udata, new_q->buf,
|
||||
new_q->buf_size, &new_q->ip);
|
||||
if (err) {
|
||||
vfree(new_q->buf);
|
||||
|
@ -76,12 +76,9 @@ struct rxe_queue {
|
||||
unsigned int index_mask;
|
||||
};
|
||||
|
||||
int do_mmap_info(struct rxe_dev *rxe,
|
||||
struct mminfo __user *outbuf,
|
||||
struct ib_ucontext *context,
|
||||
struct rxe_queue_buf *buf,
|
||||
size_t buf_size,
|
||||
struct rxe_mmap_info **ip_p);
|
||||
int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
|
||||
struct ib_udata *udata, struct rxe_queue_buf *buf,
|
||||
size_t buf_size, struct rxe_mmap_info **ip_p);
|
||||
|
||||
void rxe_queue_reset(struct rxe_queue *q);
|
||||
|
||||
@ -89,10 +86,8 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
|
||||
int *num_elem,
|
||||
unsigned int elem_size);
|
||||
|
||||
int rxe_queue_resize(struct rxe_queue *q,
|
||||
unsigned int *num_elem_p,
|
||||
unsigned int elem_size,
|
||||
struct ib_ucontext *context,
|
||||
int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
|
||||
unsigned int elem_size, struct ib_udata *udata,
|
||||
struct mminfo __user *outbuf,
|
||||
/* Protect producers while resizing queue */
|
||||
spinlock_t *producer_lock,
|
||||
|
@ -99,8 +99,7 @@ err1:
|
||||
}
|
||||
|
||||
int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
struct ib_srq_init_attr *init,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_srq_init_attr *init, struct ib_udata *udata,
|
||||
struct rxe_create_srq_resp __user *uresp)
|
||||
{
|
||||
int err;
|
||||
@ -128,7 +127,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
|
||||
srq->rq.queue = q;
|
||||
|
||||
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, q->buf,
|
||||
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf,
|
||||
q->buf_size, &q->ip);
|
||||
if (err) {
|
||||
vfree(q->buf);
|
||||
@ -149,7 +148,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
|
||||
int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
|
||||
struct rxe_modify_srq_cmd *ucmd)
|
||||
struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata)
|
||||
{
|
||||
int err;
|
||||
struct rxe_queue *q = srq->rq.queue;
|
||||
@ -163,11 +162,8 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
|
||||
mi = u64_to_user_ptr(ucmd->mmap_info_addr);
|
||||
|
||||
err = rxe_queue_resize(q, &attr->max_wr,
|
||||
rcv_wqe_size(srq->rq.max_sge),
|
||||
srq->rq.queue->ip ?
|
||||
srq->rq.queue->ip->context :
|
||||
NULL,
|
||||
mi, &srq->rq.producer_lock,
|
||||
rcv_wqe_size(srq->rq.max_sge), udata, mi,
|
||||
&srq->rq.producer_lock,
|
||||
&srq->rq.consumer_lock);
|
||||
if (err)
|
||||
goto err2;
|
||||
|
@ -176,8 +176,7 @@ static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
|
||||
{
|
||||
struct rxe_dev *rxe = to_rdev(ibpd->device);
|
||||
struct rxe_pd *pd = to_rpd(ibpd);
|
||||
@ -305,8 +304,6 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
|
||||
int err;
|
||||
struct rxe_dev *rxe = to_rdev(ibpd->device);
|
||||
struct rxe_pd *pd = to_rpd(ibpd);
|
||||
struct rxe_ucontext *ucontext =
|
||||
rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc);
|
||||
struct rxe_srq *srq;
|
||||
struct rxe_create_srq_resp __user *uresp = NULL;
|
||||
|
||||
@ -330,7 +327,7 @@ static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
|
||||
rxe_add_ref(pd);
|
||||
srq->pd = pd;
|
||||
|
||||
err = rxe_srq_from_init(rxe, srq, init, &ucontext->ibuc, uresp);
|
||||
err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
|
||||
if (err)
|
||||
goto err2;
|
||||
|
||||
@ -366,7 +363,7 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
if (err)
|
||||
goto err1;
|
||||
|
||||
err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
|
||||
err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd, udata);
|
||||
if (err)
|
||||
goto err1;
|
||||
|
||||
@ -799,7 +796,6 @@ err1:
|
||||
|
||||
static struct ib_cq *rxe_create_cq(struct ib_device *dev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int err;
|
||||
@ -826,8 +822,8 @@ static struct ib_cq *rxe_create_cq(struct ib_device *dev,
|
||||
goto err1;
|
||||
}
|
||||
|
||||
err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
|
||||
context, uresp);
|
||||
err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
|
||||
uresp);
|
||||
if (err)
|
||||
goto err2;
|
||||
|
||||
@ -866,7 +862,7 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
|
||||
if (err)
|
||||
goto err1;
|
||||
|
||||
err = rxe_cq_resize_queue(cq, cqe, uresp);
|
||||
err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
|
||||
if (err)
|
||||
goto err1;
|
||||
|
||||
|
@ -2394,8 +2394,7 @@ struct ib_device_ops {
|
||||
void (*dealloc_ucontext)(struct ib_ucontext *context);
|
||||
int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
|
||||
int (*alloc_pd)(struct ib_pd *pd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
|
||||
void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
|
||||
struct ib_ah *(*create_ah)(struct ib_pd *pd,
|
||||
struct rdma_ah_attr *ah_attr, u32 flags,
|
||||
@ -2421,7 +2420,6 @@ struct ib_device_ops {
|
||||
int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
|
||||
struct ib_cq *(*create_cq)(struct ib_device *device,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
||||
int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
|
||||
@ -2456,7 +2454,6 @@ struct ib_device_ops {
|
||||
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
|
||||
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
|
||||
struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
|
||||
struct ib_ucontext *ucontext,
|
||||
struct ib_udata *udata);
|
||||
int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
|
||||
struct ib_flow *(*create_flow)(struct ib_qp *qp,
|
||||
|
Loading…
Reference in New Issue
Block a user