mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
RDMA: Handle ucontext allocations by IB/core
Following the PD conversion patch, do the same for ucontext allocations. Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
afc1990e08
commit
a2a074ef39
@ -1832,6 +1832,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
|
||||
SET_DEVICE_OP(dev_ops, unmap_fmr);
|
||||
|
||||
SET_OBJ_SIZE(dev_ops, ib_pd);
|
||||
SET_OBJ_SIZE(dev_ops, ib_ucontext);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_set_device_ops);
|
||||
|
||||
|
@ -844,7 +844,6 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
|
||||
{
|
||||
struct ib_ucontext *ucontext = ufile->ucontext;
|
||||
struct ib_device *ib_dev = ucontext->device;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If we are closing the FD then the user mmap VMAs must have
|
||||
@ -862,12 +861,8 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
|
||||
|
||||
rdma_restrack_del(&ucontext->res);
|
||||
|
||||
/*
|
||||
* FIXME: Drivers are not permitted to fail dealloc_ucontext, remove
|
||||
* the error return.
|
||||
*/
|
||||
ret = ib_dev->ops.dealloc_ucontext(ucontext);
|
||||
WARN_ON(ret);
|
||||
ib_dev->ops.dealloc_ucontext(ucontext);
|
||||
kfree(ucontext);
|
||||
|
||||
ufile->ucontext = NULL;
|
||||
}
|
||||
|
@ -224,12 +224,13 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ucontext = ib_dev->ops.alloc_ucontext(ib_dev, &attrs->driver_udata);
|
||||
if (IS_ERR(ucontext)) {
|
||||
ret = PTR_ERR(ucontext);
|
||||
ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext);
|
||||
if (!ucontext) {
|
||||
ret = -ENOMEM;
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
ucontext->res.type = RDMA_RESTRACK_CTX;
|
||||
ucontext->device = ib_dev;
|
||||
ucontext->cg_obj = cg_obj;
|
||||
/* ufile is required when some objects are released */
|
||||
@ -240,10 +241,6 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
|
||||
|
||||
mutex_init(&ucontext->per_mm_list_lock);
|
||||
INIT_LIST_HEAD(&ucontext->per_mm_list);
|
||||
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
|
||||
ucontext->invalidate_range = NULL;
|
||||
|
||||
resp.num_comp_vectors = file->device->num_comp_vectors;
|
||||
|
||||
ret = get_unused_fd_flags(O_CLOEXEC);
|
||||
if (ret < 0)
|
||||
@ -256,15 +253,22 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
|
||||
goto err_fd;
|
||||
}
|
||||
|
||||
resp.num_comp_vectors = file->device->num_comp_vectors;
|
||||
|
||||
ret = uverbs_response(attrs, &resp, sizeof(resp));
|
||||
if (ret)
|
||||
goto err_file;
|
||||
|
||||
fd_install(resp.async_fd, filp);
|
||||
ret = ib_dev->ops.alloc_ucontext(ucontext, &attrs->driver_udata);
|
||||
if (ret)
|
||||
goto err_file;
|
||||
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
|
||||
ucontext->invalidate_range = NULL;
|
||||
|
||||
ucontext->res.type = RDMA_RESTRACK_CTX;
|
||||
rdma_restrack_uadd(&ucontext->res);
|
||||
|
||||
fd_install(resp.async_fd, filp);
|
||||
|
||||
/*
|
||||
* Make sure that ib_uverbs_get_ucontext() sees the pointer update
|
||||
* only after all writes to setup the ucontext have completed
|
||||
@ -283,7 +287,7 @@ err_fd:
|
||||
put_unused_fd(resp.async_fd);
|
||||
|
||||
err_free:
|
||||
ib_dev->ops.dealloc_ucontext(ucontext);
|
||||
kfree(ucontext);
|
||||
|
||||
err_alloc:
|
||||
ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
|
||||
|
@ -3671,13 +3671,14 @@ free_mr:
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
|
||||
struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata)
|
||||
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = ctx->device;
|
||||
struct bnxt_re_ucontext *uctx =
|
||||
container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
|
||||
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
|
||||
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
|
||||
struct bnxt_re_uctx_resp resp;
|
||||
struct bnxt_re_ucontext *uctx;
|
||||
u32 chip_met_rev_num = 0;
|
||||
int rc;
|
||||
|
||||
@ -3687,13 +3688,9 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
|
||||
if (ibdev->uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
|
||||
dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
|
||||
BNXT_RE_ABI_VERSION);
|
||||
return ERR_PTR(-EPERM);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
uctx = kzalloc(sizeof(*uctx), GFP_KERNEL);
|
||||
if (!uctx)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
uctx->rdev = rdev;
|
||||
|
||||
uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
|
||||
@ -3727,23 +3724,21 @@ struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
|
||||
goto cfail;
|
||||
}
|
||||
|
||||
return &uctx->ib_uctx;
|
||||
return 0;
|
||||
cfail:
|
||||
free_page((unsigned long)uctx->shpg);
|
||||
uctx->shpg = NULL;
|
||||
fail:
|
||||
kfree(uctx);
|
||||
return ERR_PTR(rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
|
||||
void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
|
||||
{
|
||||
struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
|
||||
struct bnxt_re_ucontext,
|
||||
ib_uctx);
|
||||
|
||||
struct bnxt_re_dev *rdev = uctx->rdev;
|
||||
int rc = 0;
|
||||
|
||||
if (uctx->shpg)
|
||||
free_page((unsigned long)uctx->shpg);
|
||||
@ -3752,17 +3747,10 @@ int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
|
||||
/* Free DPI only if this is the first PD allocated by the
|
||||
* application and mark the context dpi as NULL
|
||||
*/
|
||||
rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
|
||||
&rdev->qplib_res.dpi_tbl,
|
||||
&uctx->dpi);
|
||||
if (rc)
|
||||
dev_err(rdev_to_dev(rdev), "Deallocate HW DPI failed!");
|
||||
/* Don't fail, continue*/
|
||||
bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
|
||||
&rdev->qplib_res.dpi_tbl, &uctx->dpi);
|
||||
uctx->dpi.dbr = NULL;
|
||||
}
|
||||
|
||||
kfree(uctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Helper function to mmap the virtual memory from user app */
|
||||
|
@ -135,8 +135,8 @@ struct bnxt_re_mw {
|
||||
};
|
||||
|
||||
struct bnxt_re_ucontext {
|
||||
struct ib_ucontext ib_uctx;
|
||||
struct bnxt_re_dev *rdev;
|
||||
struct ib_ucontext ib_uctx;
|
||||
struct bnxt_qplib_dpi dpi;
|
||||
void *shpg;
|
||||
spinlock_t sh_lock; /* protect shpg */
|
||||
@ -215,9 +215,8 @@ int bnxt_re_dealloc_mw(struct ib_mw *mw);
|
||||
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt_addr, int mr_access_flags,
|
||||
struct ib_udata *udata);
|
||||
struct ib_ucontext *bnxt_re_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
|
||||
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
|
||||
void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
|
||||
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||
|
||||
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
|
||||
|
@ -638,6 +638,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
|
||||
.reg_user_mr = bnxt_re_reg_user_mr,
|
||||
.req_notify_cq = bnxt_re_req_notify_cq,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
|
||||
};
|
||||
|
||||
static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
|
||||
|
@ -62,7 +62,7 @@
|
||||
#include <rdma/cxgb3-abi.h>
|
||||
#include "common.h"
|
||||
|
||||
static int iwch_dealloc_ucontext(struct ib_ucontext *context)
|
||||
static void iwch_dealloc_ucontext(struct ib_ucontext *context)
|
||||
{
|
||||
struct iwch_dev *rhp = to_iwch_dev(context->device);
|
||||
struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
|
||||
@ -72,24 +72,20 @@ static int iwch_dealloc_ucontext(struct ib_ucontext *context)
|
||||
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
|
||||
kfree(mm);
|
||||
cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
|
||||
kfree(ucontext);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata)
|
||||
static int iwch_alloc_ucontext(struct ib_ucontext *ucontext,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct iwch_ucontext *context;
|
||||
struct ib_device *ibdev = ucontext->device;
|
||||
struct iwch_ucontext *context = to_iwch_ucontext(ucontext);
|
||||
struct iwch_dev *rhp = to_iwch_dev(ibdev);
|
||||
|
||||
pr_debug("%s ibdev %p\n", __func__, ibdev);
|
||||
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
if (!context)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
cxio_init_ucontext(&rhp->rdev, &context->uctx);
|
||||
INIT_LIST_HEAD(&context->mmaps);
|
||||
spin_lock_init(&context->mmap_lock);
|
||||
return &context->ibucontext;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwch_destroy_cq(struct ib_cq *ib_cq)
|
||||
@ -1342,6 +1338,7 @@ static const struct ib_device_ops iwch_dev_ops = {
|
||||
.req_notify_cq = iwch_arm_cq,
|
||||
.resize_cq = iwch_resize_cq,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, iwch_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, iwch_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
int iwch_register_device(struct iwch_dev *dev)
|
||||
|
@ -58,7 +58,7 @@ static int fastreg_support = 1;
|
||||
module_param(fastreg_support, int, 0644);
|
||||
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
|
||||
|
||||
static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
|
||||
static void c4iw_dealloc_ucontext(struct ib_ucontext *context)
|
||||
{
|
||||
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
|
||||
struct c4iw_dev *rhp;
|
||||
@ -70,26 +70,19 @@ static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
|
||||
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
|
||||
kfree(mm);
|
||||
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
|
||||
kfree(ucontext);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata)
|
||||
static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct c4iw_ucontext *context;
|
||||
struct ib_device *ibdev = ucontext->device;
|
||||
struct c4iw_ucontext *context = to_c4iw_ucontext(ucontext);
|
||||
struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
|
||||
struct c4iw_alloc_ucontext_resp uresp;
|
||||
int ret = 0;
|
||||
struct c4iw_mm_entry *mm = NULL;
|
||||
|
||||
pr_debug("ibdev %p\n", ibdev);
|
||||
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
if (!context) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
|
||||
INIT_LIST_HEAD(&context->mmaps);
|
||||
spin_lock_init(&context->mmap_lock);
|
||||
@ -101,7 +94,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
|
||||
mm = kmalloc(sizeof(*mm), GFP_KERNEL);
|
||||
if (!mm) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free;
|
||||
goto err;
|
||||
}
|
||||
|
||||
uresp.status_page_size = PAGE_SIZE;
|
||||
@ -121,13 +114,11 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
|
||||
mm->len = PAGE_SIZE;
|
||||
insert_mmap(context, mm);
|
||||
}
|
||||
return &context->ibucontext;
|
||||
return 0;
|
||||
err_mm:
|
||||
kfree(mm);
|
||||
err_free:
|
||||
kfree(context);
|
||||
err:
|
||||
return ERR_PTR(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
@ -555,6 +546,7 @@ static const struct ib_device_ops c4iw_dev_ops = {
|
||||
.reg_user_mr = c4iw_reg_user_mr,
|
||||
.req_notify_cq = c4iw_arm_cq,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
void c4iw_register_device(struct work_struct *work)
|
||||
|
@ -335,23 +335,19 @@ static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
|
||||
struct ib_udata *udata)
|
||||
static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int ret = 0;
|
||||
struct hns_roce_ucontext *context;
|
||||
struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
|
||||
struct hns_roce_ib_alloc_ucontext_resp resp = {};
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
|
||||
|
||||
if (!hr_dev->active)
|
||||
return ERR_PTR(-EAGAIN);
|
||||
return -EAGAIN;
|
||||
|
||||
resp.qp_tab_size = hr_dev->caps.num_qps;
|
||||
|
||||
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
if (!context)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = hns_roce_uar_alloc(hr_dev, &context->uar);
|
||||
if (ret)
|
||||
goto error_fail_uar_alloc;
|
||||
@ -365,25 +361,20 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
|
||||
if (ret)
|
||||
goto error_fail_copy_to_udata;
|
||||
|
||||
return &context->ibucontext;
|
||||
return 0;
|
||||
|
||||
error_fail_copy_to_udata:
|
||||
hns_roce_uar_free(hr_dev, &context->uar);
|
||||
|
||||
error_fail_uar_alloc:
|
||||
kfree(context);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
||||
static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
||||
{
|
||||
struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
|
||||
|
||||
hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar);
|
||||
kfree(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns_roce_mmap(struct ib_ucontext *context,
|
||||
@ -478,6 +469,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
|
||||
.query_port = hns_roce_query_port,
|
||||
.reg_user_mr = hns_roce_reg_user_mr,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, hns_roce_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, hns_roce_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
static const struct ib_device_ops hns_roce_dev_mr_ops = {
|
||||
|
@ -121,78 +121,55 @@ static int i40iw_query_port(struct ib_device *ibdev,
|
||||
|
||||
/**
|
||||
* i40iw_alloc_ucontext - Allocate the user context data structure
|
||||
* @ibdev: device pointer from stack
|
||||
* @uctx: Uverbs context pointer from stack
|
||||
* @udata: user data
|
||||
*
|
||||
* This keeps track of all objects associated with a particular
|
||||
* user-mode client.
|
||||
*/
|
||||
static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata)
|
||||
static int i40iw_alloc_ucontext(struct ib_ucontext *uctx,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = uctx->device;
|
||||
struct i40iw_device *iwdev = to_iwdev(ibdev);
|
||||
struct i40iw_alloc_ucontext_req req;
|
||||
struct i40iw_alloc_ucontext_resp uresp;
|
||||
struct i40iw_ucontext *ucontext;
|
||||
struct i40iw_alloc_ucontext_resp uresp = {};
|
||||
struct i40iw_ucontext *ucontext = to_ucontext(uctx);
|
||||
|
||||
if (ib_copy_from_udata(&req, udata, sizeof(req)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
|
||||
i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&uresp, 0, sizeof(uresp));
|
||||
uresp.max_qps = iwdev->max_qp;
|
||||
uresp.max_pds = iwdev->max_pd;
|
||||
uresp.wq_size = iwdev->max_qp_wr * 2;
|
||||
uresp.kernel_ver = req.userspace_ver;
|
||||
|
||||
ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
|
||||
if (!ucontext)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ucontext->iwdev = iwdev;
|
||||
ucontext->abi_ver = req.userspace_ver;
|
||||
|
||||
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
|
||||
kfree(ucontext);
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp)))
|
||||
return -EFAULT;
|
||||
|
||||
INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
|
||||
spin_lock_init(&ucontext->cq_reg_mem_list_lock);
|
||||
INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
|
||||
spin_lock_init(&ucontext->qp_reg_mem_list_lock);
|
||||
|
||||
return &ucontext->ibucontext;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40iw_dealloc_ucontext - deallocate the user context data structure
|
||||
* @context: user context created during alloc
|
||||
*/
|
||||
static int i40iw_dealloc_ucontext(struct ib_ucontext *context)
|
||||
static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
|
||||
{
|
||||
struct i40iw_ucontext *ucontext = to_ucontext(context);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
|
||||
if (!list_empty(&ucontext->cq_reg_mem_list)) {
|
||||
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
|
||||
return -EBUSY;
|
||||
}
|
||||
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
|
||||
spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
|
||||
if (!list_empty(&ucontext->qp_reg_mem_list)) {
|
||||
spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
|
||||
return -EBUSY;
|
||||
}
|
||||
spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
|
||||
|
||||
kfree(ucontext);
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2740,6 +2717,7 @@ static const struct ib_device_ops i40iw_dev_ops = {
|
||||
.reg_user_mr = i40iw_reg_user_mr,
|
||||
.req_notify_cq = i40iw_req_notify_cq,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, i40iw_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, i40iw_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -1076,17 +1076,18 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata)
|
||||
static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = uctx->device;
|
||||
struct mlx4_ib_dev *dev = to_mdev(ibdev);
|
||||
struct mlx4_ib_ucontext *context;
|
||||
struct mlx4_ib_ucontext *context = to_mucontext(uctx);
|
||||
struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
|
||||
struct mlx4_ib_alloc_ucontext_resp resp;
|
||||
int err;
|
||||
|
||||
if (!dev->ib_active)
|
||||
return ERR_PTR(-EAGAIN);
|
||||
return -EAGAIN;
|
||||
|
||||
if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
|
||||
resp_v3.qp_tab_size = dev->dev->caps.num_qps;
|
||||
@ -1100,15 +1101,9 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
|
||||
resp.cqe_size = dev->dev->caps.cqe_size;
|
||||
}
|
||||
|
||||
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
if (!context)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
|
||||
if (err) {
|
||||
kfree(context);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
INIT_LIST_HEAD(&context->db_page_list);
|
||||
mutex_init(&context->db_page_mutex);
|
||||
@ -1123,21 +1118,17 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
|
||||
|
||||
if (err) {
|
||||
mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
|
||||
kfree(context);
|
||||
return ERR_PTR(-EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return &context->ibucontext;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
||||
static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
||||
{
|
||||
struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
|
||||
|
||||
mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
|
||||
kfree(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
|
||||
@ -2570,6 +2561,7 @@ static const struct ib_device_ops mlx4_ib_dev_ops = {
|
||||
.rereg_user_mr = mlx4_ib_rereg_user_mr,
|
||||
.resize_cq = mlx4_ib_resize_cq,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
|
||||
|
@ -1745,14 +1745,15 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
|
||||
mlx5_ib_disable_lb(dev, true, false);
|
||||
}
|
||||
|
||||
static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata)
|
||||
static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = uctx->device;
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
||||
struct mlx5_ib_alloc_ucontext_req_v2 req = {};
|
||||
struct mlx5_ib_alloc_ucontext_resp resp = {};
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
struct mlx5_ib_ucontext *context;
|
||||
struct mlx5_ib_ucontext *context = to_mucontext(uctx);
|
||||
struct mlx5_bfreg_info *bfregi;
|
||||
int ver;
|
||||
int err;
|
||||
@ -1762,29 +1763,29 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
||||
bool lib_uar_4k;
|
||||
|
||||
if (!dev->ib_active)
|
||||
return ERR_PTR(-EAGAIN);
|
||||
return -EAGAIN;
|
||||
|
||||
if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
|
||||
ver = 0;
|
||||
else if (udata->inlen >= min_req_v2)
|
||||
ver = 2;
|
||||
else
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
|
||||
if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
req.total_num_bfregs = ALIGN(req.total_num_bfregs,
|
||||
MLX5_NON_FP_BFREGS_PER_UAR);
|
||||
if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
|
||||
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
|
||||
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
|
||||
@ -1817,10 +1818,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
||||
/* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
|
||||
}
|
||||
|
||||
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
if (!context)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
|
||||
bfregi = &context->bfregi;
|
||||
|
||||
@ -1955,7 +1952,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
|
||||
1, &dev->roce[port].tx_port_affinity));
|
||||
}
|
||||
|
||||
return &context->ibucontext;
|
||||
return 0;
|
||||
|
||||
out_mdev:
|
||||
mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
|
||||
@ -1973,12 +1970,10 @@ out_count:
|
||||
kfree(bfregi->count);
|
||||
|
||||
out_ctx:
|
||||
kfree(context);
|
||||
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
||||
static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
||||
{
|
||||
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
|
||||
@ -1998,9 +1993,6 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
||||
deallocate_uars(dev, context);
|
||||
kfree(bfregi->sys_pages);
|
||||
kfree(bfregi->count);
|
||||
kfree(context);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
|
||||
@ -5984,6 +5976,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
|
||||
.rereg_user_mr = mlx5_ib_rereg_user_mr,
|
||||
.resize_cq = mlx5_ib_resize_cq,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = {
|
||||
|
@ -301,17 +301,16 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata)
|
||||
static int mthca_alloc_ucontext(struct ib_ucontext *uctx,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct mthca_alloc_ucontext_resp uresp;
|
||||
struct mthca_ucontext *context;
|
||||
struct ib_device *ibdev = uctx->device;
|
||||
struct mthca_alloc_ucontext_resp uresp = {};
|
||||
struct mthca_ucontext *context = to_mucontext(uctx);
|
||||
int err;
|
||||
|
||||
if (!(to_mdev(ibdev)->active))
|
||||
return ERR_PTR(-EAGAIN);
|
||||
|
||||
memset(&uresp, 0, sizeof uresp);
|
||||
return -EAGAIN;
|
||||
|
||||
uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
|
||||
if (mthca_is_memfree(to_mdev(ibdev)))
|
||||
@ -319,44 +318,33 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
|
||||
else
|
||||
uresp.uarc_size = 0;
|
||||
|
||||
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
if (!context)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
|
||||
if (err) {
|
||||
kfree(context);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
|
||||
if (IS_ERR(context->db_tab)) {
|
||||
err = PTR_ERR(context->db_tab);
|
||||
mthca_uar_free(to_mdev(ibdev), &context->uar);
|
||||
kfree(context);
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
|
||||
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
|
||||
mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
|
||||
mthca_uar_free(to_mdev(ibdev), &context->uar);
|
||||
kfree(context);
|
||||
return ERR_PTR(-EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
context->reg_mr_warned = 0;
|
||||
|
||||
return &context->ibucontext;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mthca_dealloc_ucontext(struct ib_ucontext *context)
|
||||
static void mthca_dealloc_ucontext(struct ib_ucontext *context)
|
||||
{
|
||||
mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
|
||||
to_mucontext(context)->db_tab);
|
||||
mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
|
||||
kfree(to_mucontext(context));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mthca_mmap_uar(struct ib_ucontext *context,
|
||||
@ -1213,6 +1201,7 @@ static const struct ib_device_ops mthca_dev_ops = {
|
||||
.reg_user_mr = mthca_reg_user_mr,
|
||||
.resize_cq = mthca_resize_cq,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
static const struct ib_device_ops mthca_dev_arbel_srq_ops = {
|
||||
|
@ -529,27 +529,27 @@ static int nes_query_gid(struct ib_device *ibdev, u8 port,
|
||||
* nes_alloc_ucontext - Allocate the user context data structure. This keeps track
|
||||
* of all objects associated with a particular user-mode client.
|
||||
*/
|
||||
static struct ib_ucontext *nes_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata)
|
||||
static int nes_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = uctx->device;
|
||||
struct nes_vnic *nesvnic = to_nesvnic(ibdev);
|
||||
struct nes_device *nesdev = nesvnic->nesdev;
|
||||
struct nes_adapter *nesadapter = nesdev->nesadapter;
|
||||
struct nes_alloc_ucontext_req req;
|
||||
struct nes_alloc_ucontext_resp uresp = {};
|
||||
struct nes_ucontext *nes_ucontext;
|
||||
struct nes_ucontext *nes_ucontext = to_nesucontext(uctx);
|
||||
struct nes_ib_device *nesibdev = nesvnic->nesibdev;
|
||||
|
||||
|
||||
if (ib_copy_from_udata(&req, udata, sizeof(struct nes_alloc_ucontext_req))) {
|
||||
printk(KERN_ERR PFX "Invalid structure size on allocate user context.\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (req.userspace_ver != NES_ABI_USERSPACE_VER) {
|
||||
printk(KERN_ERR PFX "Invalid userspace driver version detected. Detected version %d, should be %d\n",
|
||||
req.userspace_ver, NES_ABI_USERSPACE_VER);
|
||||
return ERR_PTR(-EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
@ -559,10 +559,6 @@ static struct ib_ucontext *nes_alloc_ucontext(struct ib_device *ibdev,
|
||||
uresp.virtwq = nesadapter->virtwq;
|
||||
uresp.kernel_ver = NES_ABI_KERNEL_VER;
|
||||
|
||||
nes_ucontext = kzalloc(sizeof *nes_ucontext, GFP_KERNEL);
|
||||
if (!nes_ucontext)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
nes_ucontext->nesdev = nesdev;
|
||||
nes_ucontext->mmap_wq_offset = uresp.max_pds;
|
||||
nes_ucontext->mmap_cq_offset = nes_ucontext->mmap_wq_offset +
|
||||
@ -570,29 +566,22 @@ static struct ib_ucontext *nes_alloc_ucontext(struct ib_device *ibdev,
|
||||
PAGE_SIZE;
|
||||
|
||||
|
||||
if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
|
||||
kfree(nes_ucontext);
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp)))
|
||||
return -EFAULT;
|
||||
|
||||
INIT_LIST_HEAD(&nes_ucontext->cq_reg_mem_list);
|
||||
INIT_LIST_HEAD(&nes_ucontext->qp_reg_mem_list);
|
||||
return &nes_ucontext->ibucontext;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* nes_dealloc_ucontext
|
||||
*/
|
||||
static int nes_dealloc_ucontext(struct ib_ucontext *context)
|
||||
static void nes_dealloc_ucontext(struct ib_ucontext *context)
|
||||
{
|
||||
struct nes_ucontext *nes_ucontext = to_nesucontext(context);
|
||||
|
||||
kfree(nes_ucontext);
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* nes_mmap
|
||||
*/
|
||||
@ -3599,6 +3588,7 @@ static const struct ib_device_ops nes_dev_ops = {
|
||||
.reg_user_mr = nes_reg_user_mr,
|
||||
.req_notify_cq = nes_req_notify_cq,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, nes_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, nes_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -180,6 +180,7 @@ static const struct ib_device_ops ocrdma_dev_ops = {
|
||||
.req_notify_cq = ocrdma_arm_cq,
|
||||
.resize_cq = ocrdma_resize_cq,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, ocrdma_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, ocrdma_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
static const struct ib_device_ops ocrdma_dev_srq_ops = {
|
||||
|
@ -440,7 +440,7 @@ err:
|
||||
return status;
|
||||
}
|
||||
|
||||
static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
|
||||
static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
|
||||
{
|
||||
struct ocrdma_pd *pd = uctx->cntxt_pd;
|
||||
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
|
||||
@ -451,8 +451,7 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
|
||||
}
|
||||
kfree(uctx->cntxt_pd);
|
||||
uctx->cntxt_pd = NULL;
|
||||
(void)_ocrdma_dealloc_pd(dev, pd);
|
||||
return 0;
|
||||
_ocrdma_dealloc_pd(dev, pd);
|
||||
}
|
||||
|
||||
static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
|
||||
@ -476,33 +475,28 @@ static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
|
||||
mutex_unlock(&uctx->mm_list_lock);
|
||||
}
|
||||
|
||||
struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata)
|
||||
int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = uctx->device;
|
||||
int status;
|
||||
struct ocrdma_ucontext *ctx;
|
||||
struct ocrdma_alloc_ucontext_resp resp;
|
||||
struct ocrdma_ucontext *ctx = get_ocrdma_ucontext(uctx);
|
||||
struct ocrdma_alloc_ucontext_resp resp = {};
|
||||
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
|
||||
struct pci_dev *pdev = dev->nic_info.pdev;
|
||||
u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
|
||||
|
||||
if (!udata)
|
||||
return ERR_PTR(-EFAULT);
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -EFAULT;
|
||||
INIT_LIST_HEAD(&ctx->mm_head);
|
||||
mutex_init(&ctx->mm_list_lock);
|
||||
|
||||
ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
|
||||
&ctx->ah_tbl.pa, GFP_KERNEL);
|
||||
if (!ctx->ah_tbl.va) {
|
||||
kfree(ctx);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
if (!ctx->ah_tbl.va)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->ah_tbl.len = map_len;
|
||||
|
||||
memset(&resp, 0, sizeof(resp));
|
||||
resp.ah_tbl_len = ctx->ah_tbl.len;
|
||||
resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
|
||||
|
||||
@ -524,7 +518,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
|
||||
status = ib_copy_to_udata(udata, &resp, sizeof(resp));
|
||||
if (status)
|
||||
goto cpy_err;
|
||||
return &ctx->ibucontext;
|
||||
return 0;
|
||||
|
||||
cpy_err:
|
||||
ocrdma_dealloc_ucontext_pd(ctx);
|
||||
@ -533,19 +527,17 @@ pd_err:
|
||||
map_err:
|
||||
dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
|
||||
ctx->ah_tbl.pa);
|
||||
kfree(ctx);
|
||||
return ERR_PTR(status);
|
||||
return status;
|
||||
}
|
||||
|
||||
int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
|
||||
void ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
|
||||
{
|
||||
int status;
|
||||
struct ocrdma_mm *mm, *tmp;
|
||||
struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
|
||||
struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
|
||||
struct pci_dev *pdev = dev->nic_info.pdev;
|
||||
|
||||
status = ocrdma_dealloc_ucontext_pd(uctx);
|
||||
ocrdma_dealloc_ucontext_pd(uctx);
|
||||
|
||||
ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
|
||||
dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
|
||||
@ -555,8 +547,6 @@ int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
|
||||
list_del(&mm->entry);
|
||||
kfree(mm);
|
||||
}
|
||||
kfree(uctx);
|
||||
return status;
|
||||
}
|
||||
|
||||
int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
|
@ -64,9 +64,8 @@ void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);
|
||||
struct net_device *ocrdma_get_netdev(struct ib_device *device, u8 port_num);
|
||||
int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
|
||||
|
||||
struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *,
|
||||
struct ib_udata *);
|
||||
int ocrdma_dealloc_ucontext(struct ib_ucontext *);
|
||||
int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
|
||||
void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx);
|
||||
|
||||
int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
|
||||
|
||||
|
@ -240,6 +240,7 @@ static const struct ib_device_ops qedr_dev_ops = {
|
||||
.req_notify_cq = qedr_arm_cq,
|
||||
.resize_cq = qedr_resize_cq,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
static int qedr_register_device(struct qedr_dev *dev)
|
||||
|
@ -316,28 +316,24 @@ static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
|
||||
return found;
|
||||
}
|
||||
|
||||
struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata)
|
||||
int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = uctx->device;
|
||||
int rc;
|
||||
struct qedr_ucontext *ctx;
|
||||
struct qedr_alloc_ucontext_resp uresp;
|
||||
struct qedr_ucontext *ctx = get_qedr_ucontext(uctx);
|
||||
struct qedr_alloc_ucontext_resp uresp = {};
|
||||
struct qedr_dev *dev = get_qedr_dev(ibdev);
|
||||
struct qed_rdma_add_user_out_params oparams;
|
||||
|
||||
if (!udata)
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -EFAULT;
|
||||
|
||||
rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
|
||||
if (rc) {
|
||||
DP_ERR(dev,
|
||||
"failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
|
||||
rc);
|
||||
goto err;
|
||||
return rc;
|
||||
}
|
||||
|
||||
ctx->dpi = oparams.dpi;
|
||||
@ -347,8 +343,6 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
|
||||
INIT_LIST_HEAD(&ctx->mm_head);
|
||||
mutex_init(&ctx->mm_list_lock);
|
||||
|
||||
memset(&uresp, 0, sizeof(uresp));
|
||||
|
||||
uresp.dpm_enabled = dev->user_dpm_enabled;
|
||||
uresp.wids_enabled = 1;
|
||||
uresp.wid_count = oparams.wid_count;
|
||||
@ -364,28 +358,23 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
|
||||
|
||||
rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
|
||||
if (rc)
|
||||
goto err;
|
||||
return rc;
|
||||
|
||||
ctx->dev = dev;
|
||||
|
||||
rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
|
||||
if (rc)
|
||||
goto err;
|
||||
return rc;
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
|
||||
&ctx->ibucontext);
|
||||
return &ctx->ibucontext;
|
||||
|
||||
err:
|
||||
kfree(ctx);
|
||||
return ERR_PTR(rc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
|
||||
void qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
|
||||
{
|
||||
struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
|
||||
struct qedr_mm *mm, *tmp;
|
||||
int status = 0;
|
||||
|
||||
DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
|
||||
uctx);
|
||||
@ -398,9 +387,6 @@ int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
|
||||
list_del(&mm->entry);
|
||||
kfree(mm);
|
||||
}
|
||||
|
||||
kfree(uctx);
|
||||
return status;
|
||||
}
|
||||
|
||||
int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
|
@ -43,8 +43,8 @@ int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
|
||||
|
||||
int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
|
||||
|
||||
struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *);
|
||||
int qedr_dealloc_ucontext(struct ib_ucontext *);
|
||||
int qedr_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
|
||||
void qedr_dealloc_ucontext(struct ib_ucontext *uctx);
|
||||
|
||||
int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
|
||||
int qedr_alloc_pd(struct ib_pd *pd, struct ib_ucontext *uctx,
|
||||
|
@ -350,6 +350,7 @@ static const struct ib_device_ops usnic_dev_ops = {
|
||||
.query_qp = usnic_ib_query_qp,
|
||||
.reg_user_mr = usnic_ib_reg_mr,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
/* Start of PF discovery section */
|
||||
|
@ -653,37 +653,31 @@ int usnic_ib_dereg_mr(struct ib_mr *ibmr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata)
|
||||
int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
|
||||
{
|
||||
struct usnic_ib_ucontext *context;
|
||||
struct ib_device *ibdev = uctx->device;
|
||||
struct usnic_ib_ucontext *context = to_ucontext(uctx);
|
||||
struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
|
||||
usnic_dbg("\n");
|
||||
|
||||
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
if (!context)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
INIT_LIST_HEAD(&context->qp_grp_list);
|
||||
mutex_lock(&us_ibdev->usdev_lock);
|
||||
list_add_tail(&context->link, &us_ibdev->ctx_list);
|
||||
mutex_unlock(&us_ibdev->usdev_lock);
|
||||
|
||||
return &context->ibucontext;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
||||
void usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
||||
{
|
||||
struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
|
||||
struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
|
||||
usnic_dbg("\n");
|
||||
|
||||
mutex_lock(&us_ibdev->usdev_lock);
|
||||
BUG_ON(!list_empty(&context->qp_grp_list));
|
||||
WARN_ON_ONCE(!list_empty(&context->qp_grp_list));
|
||||
list_del(&context->link);
|
||||
mutex_unlock(&us_ibdev->usdev_lock);
|
||||
kfree(context);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int usnic_ib_mmap(struct ib_ucontext *context,
|
||||
|
@ -68,9 +68,8 @@ struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt_addr, int access_flags,
|
||||
struct ib_udata *udata);
|
||||
int usnic_ib_dereg_mr(struct ib_mr *ibmr);
|
||||
struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata);
|
||||
int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
|
||||
int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
|
||||
void usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext);
|
||||
int usnic_ib_mmap(struct ib_ucontext *context,
|
||||
struct vm_area_struct *vma);
|
||||
#endif /* !USNIC_IB_VERBS_H */
|
||||
|
@ -196,6 +196,7 @@ static const struct ib_device_ops pvrdma_dev_ops = {
|
||||
.reg_user_mr = pvrdma_reg_user_mr,
|
||||
.req_notify_cq = pvrdma_req_notify_cq,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, pvrdma_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, pvrdma_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
static const struct ib_device_ops pvrdma_dev_srq_ops = {
|
||||
|
@ -306,41 +306,32 @@ out:
|
||||
|
||||
/**
|
||||
* pvrdma_alloc_ucontext - allocate ucontext
|
||||
* @ibdev: the IB device
|
||||
* @uctx: the uverbs countext
|
||||
* @udata: user data
|
||||
*
|
||||
* @return: the ib_ucontext pointer on success, otherwise errno.
|
||||
* @return: zero on success, otherwise errno.
|
||||
*/
|
||||
struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata)
|
||||
int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
|
||||
{
|
||||
struct ib_device *ibdev = uctx->device;
|
||||
struct pvrdma_dev *vdev = to_vdev(ibdev);
|
||||
struct pvrdma_ucontext *context;
|
||||
union pvrdma_cmd_req req;
|
||||
union pvrdma_cmd_resp rsp;
|
||||
struct pvrdma_ucontext *context = to_vucontext(uctx);
|
||||
union pvrdma_cmd_req req = {};
|
||||
union pvrdma_cmd_resp rsp = {};
|
||||
struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
|
||||
struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
|
||||
struct pvrdma_alloc_ucontext_resp uresp = {0};
|
||||
struct pvrdma_alloc_ucontext_resp uresp = {};
|
||||
int ret;
|
||||
void *ptr;
|
||||
|
||||
if (!vdev->ib_active)
|
||||
return ERR_PTR(-EAGAIN);
|
||||
|
||||
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
if (!context)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -EAGAIN;
|
||||
|
||||
context->dev = vdev;
|
||||
ret = pvrdma_uar_alloc(vdev, &context->uar);
|
||||
if (ret) {
|
||||
kfree(context);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
if (ret)
|
||||
return -ENOMEM;
|
||||
|
||||
/* get ctx_handle from host */
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
|
||||
if (vdev->dsr_version < PVRDMA_PPN64_VERSION)
|
||||
cmd->pfn = context->uar.pfn;
|
||||
else
|
||||
@ -351,7 +342,6 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
|
||||
if (ret < 0) {
|
||||
dev_warn(&vdev->pdev->dev,
|
||||
"could not create ucontext, error: %d\n", ret);
|
||||
ptr = ERR_PTR(ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -362,33 +352,28 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
|
||||
ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
|
||||
if (ret) {
|
||||
pvrdma_uar_free(vdev, &context->uar);
|
||||
context->ibucontext.device = ibdev;
|
||||
pvrdma_dealloc_ucontext(&context->ibucontext);
|
||||
return ERR_PTR(-EFAULT);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return &context->ibucontext;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
pvrdma_uar_free(vdev, &context->uar);
|
||||
kfree(context);
|
||||
return ptr;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* pvrdma_dealloc_ucontext - deallocate ucontext
|
||||
* @ibcontext: the ucontext
|
||||
*
|
||||
* @return: 0 on success, otherwise errno.
|
||||
*/
|
||||
int pvrdma_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
||||
void pvrdma_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
||||
{
|
||||
struct pvrdma_ucontext *context = to_vucontext(ibcontext);
|
||||
union pvrdma_cmd_req req;
|
||||
union pvrdma_cmd_req req = {};
|
||||
struct pvrdma_cmd_destroy_uc *cmd = &req.destroy_uc;
|
||||
int ret;
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
cmd->hdr.cmd = PVRDMA_CMD_DESTROY_UC;
|
||||
cmd->ctx_handle = context->ctx_handle;
|
||||
|
||||
@ -399,9 +384,6 @@ int pvrdma_dealloc_ucontext(struct ib_ucontext *ibcontext)
|
||||
|
||||
/* Free the UAR even if the device command failed */
|
||||
pvrdma_uar_free(to_vdev(ibcontext->device), &context->uar);
|
||||
kfree(context);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -396,9 +396,8 @@ int pvrdma_modify_device(struct ib_device *ibdev, int mask,
|
||||
int pvrdma_modify_port(struct ib_device *ibdev, u8 port,
|
||||
int mask, struct ib_port_modify *props);
|
||||
int pvrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||
struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata);
|
||||
int pvrdma_dealloc_ucontext(struct ib_ucontext *context);
|
||||
int pvrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
|
||||
void pvrdma_dealloc_ucontext(struct ib_ucontext *context);
|
||||
int pvrdma_alloc_pd(struct ib_pd *pd, struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
void pvrdma_dealloc_pd(struct ib_pd *ibpd);
|
||||
|
@ -292,28 +292,21 @@ static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext
|
||||
|
||||
/**
|
||||
* rvt_alloc_ucontext - Allocate a user context
|
||||
* @ibdev: Verbs IB dev
|
||||
* @uctx: Verbs context
|
||||
* @udata: User data allocated
|
||||
*/
|
||||
static struct ib_ucontext *rvt_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct ib_udata *udata)
|
||||
static int rvt_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
|
||||
{
|
||||
struct rvt_ucontext *context;
|
||||
|
||||
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
if (!context)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return &context->ibucontext;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
*rvt_dealloc_ucontext - Free a user context
|
||||
*@context - Free this
|
||||
* rvt_dealloc_ucontext - Free a user context
|
||||
* @context - Free this
|
||||
*/
|
||||
static int rvt_dealloc_ucontext(struct ib_ucontext *context)
|
||||
static void rvt_dealloc_ucontext(struct ib_ucontext *context)
|
||||
{
|
||||
kfree(to_iucontext(context));
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
static int rvt_get_port_immutable(struct ib_device *ibdev, u8 port_num,
|
||||
@ -433,6 +426,7 @@ static const struct ib_device_ops rvt_dev_ops = {
|
||||
.resize_cq = rvt_resize_cq,
|
||||
.unmap_fmr = rvt_unmap_fmr,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, rvt_ucontext, ibucontext),
|
||||
};
|
||||
|
||||
static noinline int check_support(struct rvt_dev_info *rdi, int verb)
|
||||
|
@ -42,6 +42,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
|
||||
[RXE_TYPE_UC] = {
|
||||
.name = "rxe-uc",
|
||||
.size = sizeof(struct rxe_ucontext),
|
||||
.flags = RXE_POOL_NO_ALLOC,
|
||||
},
|
||||
[RXE_TYPE_PD] = {
|
||||
.name = "rxe-pd",
|
||||
|
@ -142,22 +142,19 @@ static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
|
||||
return rxe_link_layer(rxe, port_num);
|
||||
}
|
||||
|
||||
static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
|
||||
struct ib_udata *udata)
|
||||
static int rxe_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
|
||||
{
|
||||
struct rxe_dev *rxe = to_rdev(dev);
|
||||
struct rxe_ucontext *uc;
|
||||
struct rxe_dev *rxe = to_rdev(uctx->device);
|
||||
struct rxe_ucontext *uc = to_ruc(uctx);
|
||||
|
||||
uc = rxe_alloc(&rxe->uc_pool);
|
||||
return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
|
||||
return rxe_add_to_pool(&rxe->uc_pool, &uc->pelem);
|
||||
}
|
||||
|
||||
static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
|
||||
static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
|
||||
{
|
||||
struct rxe_ucontext *uc = to_ruc(ibuc);
|
||||
|
||||
rxe_drop_ref(uc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
|
||||
@ -1180,6 +1177,7 @@ static const struct ib_device_ops rxe_dev_ops = {
|
||||
.req_notify_cq = rxe_req_notify_cq,
|
||||
.resize_cq = rxe_resize_cq,
|
||||
INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
|
||||
INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
|
||||
};
|
||||
|
||||
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
|
||||
|
@ -61,8 +61,8 @@ static inline int psn_compare(u32 psn_a, u32 psn_b)
|
||||
}
|
||||
|
||||
struct rxe_ucontext {
|
||||
struct ib_ucontext ibuc;
|
||||
struct rxe_pool_entry pelem;
|
||||
struct ib_ucontext ibuc;
|
||||
};
|
||||
|
||||
struct rxe_pd {
|
||||
|
@ -2389,9 +2389,9 @@ struct ib_device_ops {
|
||||
int (*del_gid)(const struct ib_gid_attr *attr, void **context);
|
||||
int (*query_pkey)(struct ib_device *device, u8 port_num, u16 index,
|
||||
u16 *pkey);
|
||||
struct ib_ucontext *(*alloc_ucontext)(struct ib_device *device,
|
||||
struct ib_udata *udata);
|
||||
int (*dealloc_ucontext)(struct ib_ucontext *context);
|
||||
int (*alloc_ucontext)(struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
void (*dealloc_ucontext)(struct ib_ucontext *context);
|
||||
int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
|
||||
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
|
||||
int (*alloc_pd)(struct ib_pd *pd, struct ib_ucontext *context,
|
||||
@ -2551,6 +2551,7 @@ struct ib_device_ops {
|
||||
void (*dealloc_driver)(struct ib_device *dev);
|
||||
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_pd);
|
||||
DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
|
||||
};
|
||||
|
||||
struct rdma_restrack_root;
|
||||
|
Loading…
Reference in New Issue
Block a user