forked from Minki/linux
RDMA: Convert destroy_wq to be void
All callers of destroy WQ are always success and there is no need to check their return value, so convert destroy_wq to be void. Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Reviewed-by: Yuval Shaia <yuval.shaia@oracle.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
8d18ad83f1
commit
a49b1dc7ae
@ -2235,19 +2235,17 @@ EXPORT_SYMBOL(ib_create_wq);
|
||||
*/
|
||||
int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
|
||||
{
|
||||
int err;
|
||||
struct ib_cq *cq = wq->cq;
|
||||
struct ib_pd *pd = wq->pd;
|
||||
|
||||
if (atomic_read(&wq->usecnt))
|
||||
return -EBUSY;
|
||||
|
||||
err = wq->device->ops.destroy_wq(wq, udata);
|
||||
if (!err) {
|
||||
atomic_dec(&pd->usecnt);
|
||||
atomic_dec(&cq->usecnt);
|
||||
}
|
||||
return err;
|
||||
wq->device->ops.destroy_wq(wq, udata);
|
||||
atomic_dec(&pd->usecnt);
|
||||
atomic_dec(&cq->usecnt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_destroy_wq);
|
||||
|
||||
|
@ -906,7 +906,7 @@ void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
|
||||
struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
|
||||
struct ib_wq_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
|
||||
void mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
|
||||
int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
|
||||
u32 wq_attr_mask, struct ib_udata *udata);
|
||||
|
||||
|
@ -4248,7 +4248,7 @@ int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr,
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
|
||||
void mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
|
||||
{
|
||||
struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
|
||||
struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
|
||||
@ -4259,8 +4259,6 @@ int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
|
||||
destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
|
||||
|
||||
kfree(qp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_rwq_ind_table
|
||||
|
@ -1201,7 +1201,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
|
||||
struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
|
||||
struct ib_wq_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
|
||||
void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
|
||||
int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
|
||||
u32 wq_attr_mask, struct ib_udata *udata);
|
||||
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
|
||||
|
@ -6047,7 +6047,7 @@ err:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
|
||||
void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(wq->device);
|
||||
struct mlx5_ib_rwq *rwq = to_mrwq(wq);
|
||||
@ -6055,8 +6055,6 @@ int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
|
||||
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
|
||||
destroy_user_rq(dev, wq->pd, rwq, udata);
|
||||
kfree(rwq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
|
||||
|
@ -2509,7 +2509,7 @@ struct ib_device_ops {
|
||||
struct ib_wq *(*create_wq)(struct ib_pd *pd,
|
||||
struct ib_wq_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
|
||||
void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
|
||||
int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
|
||||
u32 wq_attr_mask, struct ib_udata *udata);
|
||||
struct ib_rwq_ind_table *(*create_rwq_ind_table)(
|
||||
|
Loading…
Reference in New Issue
Block a user