RDMA: Clean MW allocation and free flows

Move allocation and destruction of memory windows under ib_core
responsibility and clean drivers to ensure that no updates to MW
ib_core structures are done in driver layer.

Link: https://lore.kernel.org/r/20200902081623.746359-2-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Leon Romanovsky 2020-09-02 11:16:22 +03:00 committed by Jason Gunthorpe
parent b5de0c60cc
commit d18bb3e152
16 changed files with 78 additions and 106 deletions

View File

@ -2695,6 +2695,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_OBJ_SIZE(dev_ops, ib_ah);
SET_OBJ_SIZE(dev_ops, ib_counters);
SET_OBJ_SIZE(dev_ops, ib_cq);
SET_OBJ_SIZE(dev_ops, ib_mw);
SET_OBJ_SIZE(dev_ops, ib_pd);
SET_OBJ_SIZE(dev_ops, ib_srq);
SET_OBJ_SIZE(dev_ops, ib_ucontext);

View File

@ -858,7 +858,7 @@ static int ib_uverbs_dereg_mr(struct uverbs_attr_bundle *attrs)
static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
{
struct ib_uverbs_alloc_mw cmd;
struct ib_uverbs_alloc_mw_resp resp;
struct ib_uverbs_alloc_mw_resp resp = {};
struct ib_uobject *uobj;
struct ib_pd *pd;
struct ib_mw *mw;
@ -884,15 +884,21 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
goto err_put;
}
mw = pd->device->ops.alloc_mw(pd, cmd.mw_type, &attrs->driver_udata);
if (IS_ERR(mw)) {
ret = PTR_ERR(mw);
mw = rdma_zalloc_drv_obj(ib_dev, ib_mw);
if (!mw) {
ret = -ENOMEM;
goto err_put;
}
mw->device = pd->device;
mw->device = ib_dev;
mw->pd = pd;
mw->uobject = uobj;
mw->type = cmd.mw_type;
ret = pd->device->ops.alloc_mw(mw, &attrs->driver_udata);
if (ret)
goto err_alloc;
atomic_inc(&pd->usecnt);
uobj->object = mw;
@ -903,6 +909,8 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs)
resp.mw_handle = uobj->id;
return uverbs_response(attrs, &resp, sizeof(resp));
err_alloc:
kfree(mw);
err_put:
uobj_put_obj_read(pd);
err_free:

View File

@ -108,8 +108,11 @@ int uverbs_dealloc_mw(struct ib_mw *mw)
int ret;
ret = mw->device->ops.dealloc_mw(mw);
if (!ret)
if (ret)
return ret;
atomic_dec(&pd->usecnt);
kfree(mw);
return ret;
}

View File

@ -985,8 +985,7 @@ int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
int c4iw_dealloc_mw(struct ib_mw *mw);
void c4iw_dealloc(struct uld_ctx *ctx);
struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata);
int c4iw_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
u64 length, u64 virt, int acc,
struct ib_udata *udata);

View File

@ -611,30 +611,23 @@ err_free_mhp:
return ERR_PTR(err);
}
struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata)
int c4iw_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
{
struct c4iw_mw *mhp = to_c4iw_mw(ibmw);
struct c4iw_dev *rhp;
struct c4iw_pd *php;
struct c4iw_mw *mhp;
u32 mmid;
u32 stag = 0;
int ret;
if (type != IB_MW_TYPE_1)
return ERR_PTR(-EINVAL);
if (ibmw->type != IB_MW_TYPE_1)
return -EINVAL;
php = to_c4iw_pd(pd);
php = to_c4iw_pd(ibmw->pd);
rhp = php->rhp;
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
if (!mhp)
return ERR_PTR(-ENOMEM);
mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
if (!mhp->wr_waitp) {
ret = -ENOMEM;
goto free_mhp;
}
if (!mhp->wr_waitp)
return -ENOMEM;
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
if (!mhp->dereg_skb) {
@ -645,18 +638,19 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp);
if (ret)
goto free_skb;
mhp->rhp = rhp;
mhp->attr.pdid = php->pdid;
mhp->attr.type = FW_RI_STAG_MW;
mhp->attr.stag = stag;
mmid = (stag) >> 8;
mhp->ibmw.rkey = stag;
ibmw->rkey = stag;
if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
ret = -ENOMEM;
goto dealloc_win;
}
pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
return &(mhp->ibmw);
return 0;
dealloc_win:
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
@ -665,9 +659,7 @@ free_skb:
kfree_skb(mhp->dereg_skb);
free_wr_wait:
c4iw_put_wr_wait(mhp->wr_waitp);
free_mhp:
kfree(mhp);
return ERR_PTR(ret);
return ret;
}
int c4iw_dealloc_mw(struct ib_mw *mw)
@ -684,8 +676,6 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
mhp->wr_waitp);
kfree_skb(mhp->dereg_skb);
c4iw_put_wr_wait(mhp->wr_waitp);
pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
kfree(mhp);
return 0;
}

View File

@ -498,8 +498,10 @@ static const struct ib_device_ops c4iw_dev_ops = {
.query_qp = c4iw_ib_query_qp,
.reg_user_mr = c4iw_reg_user_mr,
.req_notify_cq = c4iw_arm_cq,
INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_cq, c4iw_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_mw, c4iw_mw, ibmw),
INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_srq, c4iw_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext),
};

View File

@ -1204,8 +1204,7 @@ int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
unsigned long mpt_index);
unsigned long key_to_hw_index(u32 key);
struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type,
struct ib_udata *udata);
int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
int hns_roce_dealloc_mw(struct ib_mw *ibmw);
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);

View File

@ -454,6 +454,8 @@ static const struct ib_device_ops hns_roce_dev_mr_ops = {
static const struct ib_device_ops hns_roce_dev_mw_ops = {
.alloc_mw = hns_roce_alloc_mw,
.dealloc_mw = hns_roce_dealloc_mw,
INIT_RDMA_OBJ_SIZE(ib_mw, hns_roce_mw, ibmw),
};
static const struct ib_device_ops hns_roce_dev_frmr_ops = {

View File

@ -589,28 +589,22 @@ err_table:
return ret;
}
struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
struct ib_udata *udata)
int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_pd->device);
struct hns_roce_mw *mw;
struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
struct hns_roce_mw *mw = to_hr_mw(ibmw);
unsigned long index = 0;
int ret;
mw = kmalloc(sizeof(*mw), GFP_KERNEL);
if (!mw)
return ERR_PTR(-ENOMEM);
/* Allocate a key for mw from bitmap */
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
if (ret)
goto err_bitmap;
return ret;
mw->rkey = hw_index_to_key(index);
mw->ibmw.rkey = mw->rkey;
mw->ibmw.type = type;
mw->pdn = to_hr_pd(ib_pd)->pdn;
ibmw->rkey = mw->rkey;
mw->pdn = to_hr_pd(ibmw->pd)->pdn;
mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
@ -619,15 +613,11 @@ struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
if (ret)
goto err_mw;
return &mw->ibmw;
return 0;
err_mw:
hns_roce_mw_free(hr_dev, mw);
err_bitmap:
kfree(mw);
return ERR_PTR(ret);
return ret;
}
int hns_roce_dealloc_mw(struct ib_mw *ibmw)
@ -636,8 +626,6 @@ int hns_roce_dealloc_mw(struct ib_mw *ibmw)
struct hns_roce_mw *mw = to_hr_mw(ibmw);
hns_roce_mw_free(hr_dev, mw);
kfree(mw);
return 0;
}

View File

@ -2582,6 +2582,8 @@ static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
.alloc_mw = mlx4_ib_alloc_mw,
.dealloc_mw = mlx4_ib_dealloc_mw,
INIT_RDMA_OBJ_SIZE(ib_mw, mlx4_ib_mw, ibmw),
};
static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {

View File

@ -731,8 +731,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
int mlx4_ib_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata);
int mlx4_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
int mlx4_ib_dealloc_mw(struct ib_mw *mw);
struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);

View File

@ -611,37 +611,27 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
return 0;
}
struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata)
int mlx4_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(pd->device);
struct mlx4_ib_mw *mw;
struct mlx4_ib_dev *dev = to_mdev(ibmw->device);
struct mlx4_ib_mw *mw = to_mmw(ibmw);
int err;
mw = kmalloc(sizeof(*mw), GFP_KERNEL);
if (!mw)
return ERR_PTR(-ENOMEM);
err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
to_mlx4_type(type), &mw->mmw);
err = mlx4_mw_alloc(dev->dev, to_mpd(ibmw->pd)->pdn,
to_mlx4_type(ibmw->type), &mw->mmw);
if (err)
goto err_free;
return err;
err = mlx4_mw_enable(dev->dev, &mw->mmw);
if (err)
goto err_mw;
mw->ibmw.rkey = mw->mmw.key;
return &mw->ibmw;
ibmw->rkey = mw->mmw.key;
return 0;
err_mw:
mlx4_mw_free(dev->dev, &mw->mmw);
err_free:
kfree(mw);
return ERR_PTR(err);
return err;
}
int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
@ -649,8 +639,6 @@ int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
struct mlx4_ib_mw *mw = to_mmw(ibmw);
mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
kfree(mw);
return 0;
}

View File

@ -4098,6 +4098,8 @@ static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
.alloc_mw = mlx5_ib_alloc_mw,
.dealloc_mw = mlx5_ib_dealloc_mw,
INIT_RDMA_OBJ_SIZE(ib_mw, mlx5_ib_mw, ibmw),
};
static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {

View File

@ -1166,8 +1166,7 @@ int mlx5_ib_advise_mr(struct ib_pd *pd,
struct ib_sge *sg_list,
u32 num_sge,
struct uverbs_attr_bundle *attrs);
struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata);
int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
int mlx5_ib_dealloc_mw(struct ib_mw *mw);
int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags);

View File

@ -1973,12 +1973,11 @@ struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
max_num_meta_sg);
}
struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata)
int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_ib_mw *mw = NULL;
struct mlx5_ib_mw *mw = to_mmw(ibmw);
u32 *in = NULL;
void *mkc;
int ndescs;
@ -1991,21 +1990,20 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
if (err)
return ERR_PTR(err);
return err;
if (req.comp_mask || req.reserved1 || req.reserved2)
return ERR_PTR(-EOPNOTSUPP);
return -EOPNOTSUPP;
if (udata->inlen > sizeof(req) &&
!ib_is_udata_cleared(udata, sizeof(req),
udata->inlen - sizeof(req)))
return ERR_PTR(-EOPNOTSUPP);
return -EOPNOTSUPP;
ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
mw = kzalloc(sizeof(*mw), GFP_KERNEL);
in = kzalloc(inlen, GFP_KERNEL);
if (!mw || !in) {
if (!in) {
err = -ENOMEM;
goto free;
}
@ -2014,11 +2012,11 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn);
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
MLX5_SET(mkc, mkc, en_rinval, !!((type == IB_MW_TYPE_2)));
MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2)));
MLX5_SET(mkc, mkc, qpn, 0xffffff);
err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen);
@ -2026,17 +2024,15 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
goto free;
mw->mmkey.type = MLX5_MKEY_MW;
mw->ibmw.rkey = mw->mmkey.key;
ibmw->rkey = mw->mmkey.key;
mw->ndescs = ndescs;
resp.response_length =
min(offsetofend(typeof(resp), response_length), udata->outlen);
if (resp.response_length) {
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err) {
mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
goto free;
}
if (err)
goto free_mkey;
}
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
@ -2048,21 +2044,19 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
}
kfree(in);
return &mw->ibmw;
return 0;
free_mkey:
mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
free:
kfree(mw);
kfree(in);
return ERR_PTR(err);
return err;
}
int mlx5_ib_dealloc_mw(struct ib_mw *mw)
{
struct mlx5_ib_dev *dev = to_mdev(mw->device);
struct mlx5_ib_mw *mmw = to_mmw(mw);
int err;
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key));
@ -2073,11 +2067,7 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
synchronize_srcu(&dev->odp_srcu);
}
err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
if (err)
return err;
kfree(mmw);
return 0;
return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
}
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,

View File

@ -2448,8 +2448,7 @@ struct ib_device_ops {
unsigned int *sg_offset);
int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
struct ib_mr_status *mr_status);
struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata);
int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata);
int (*dealloc_mw)(struct ib_mw *mw);
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
@ -2610,6 +2609,7 @@ struct ib_device_ops {
DECLARE_RDMA_OBJ_SIZE(ib_ah);
DECLARE_RDMA_OBJ_SIZE(ib_counters);
DECLARE_RDMA_OBJ_SIZE(ib_cq);
DECLARE_RDMA_OBJ_SIZE(ib_mw);
DECLARE_RDMA_OBJ_SIZE(ib_pd);
DECLARE_RDMA_OBJ_SIZE(ib_srq);
DECLARE_RDMA_OBJ_SIZE(ib_ucontext);