mirror of
https://github.com/torvalds/linux.git
synced 2024-11-19 02:21:47 +00:00
RDMA: Clean destroy CQ in drivers do not return errors
Like all other destroy commands, .destroy_cq() call is not supposed to fail. In all flows, the attempt to return earlier caused to memory leaks. This patch converts .destroy_cq() to do not return any errors. Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Acked-by: Gal Pressman <galpress@amazon.com> Acked-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
147b308e6a
commit
a52c8e2469
@ -207,8 +207,6 @@ EXPORT_SYMBOL(__ib_alloc_cq_user);
|
||||
*/
|
||||
void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
|
||||
return;
|
||||
|
||||
@ -228,7 +226,6 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
|
||||
|
||||
kfree(cq->wc);
|
||||
rdma_restrack_del(&cq->res);
|
||||
ret = cq->device->ops.destroy_cq(cq, udata);
|
||||
WARN_ON_ONCE(ret);
|
||||
cq->device->ops.destroy_cq(cq, udata);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_free_cq_user);
|
||||
|
@ -1949,7 +1949,8 @@ int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
|
||||
return -EBUSY;
|
||||
|
||||
rdma_restrack_del(&cq->res);
|
||||
return cq->device->ops.destroy_cq(cq, udata);
|
||||
cq->device->ops.destroy_cq(cq, udata);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_destroy_cq_user);
|
||||
|
||||
|
@ -2517,9 +2517,8 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
|
||||
}
|
||||
|
||||
/* Completion Queues */
|
||||
int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
{
|
||||
int rc;
|
||||
struct bnxt_re_cq *cq;
|
||||
struct bnxt_qplib_nq *nq;
|
||||
struct bnxt_re_dev *rdev;
|
||||
@ -2528,20 +2527,14 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
rdev = cq->rdev;
|
||||
nq = cq->qplib_cq.nq;
|
||||
|
||||
rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
|
||||
if (rc) {
|
||||
dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
|
||||
return rc;
|
||||
}
|
||||
if (!IS_ERR_OR_NULL(cq->umem))
|
||||
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
|
||||
if (!cq->umem)
|
||||
ib_umem_release(cq->umem);
|
||||
|
||||
atomic_dec(&rdev->cq_count);
|
||||
nq->budget--;
|
||||
kfree(cq->cql);
|
||||
kfree(cq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
|
||||
|
@ -193,7 +193,7 @@ int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
|
||||
struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
void bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
|
||||
int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
|
||||
struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
|
||||
|
@ -303,17 +303,15 @@ err1:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
|
||||
void cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
|
||||
{
|
||||
int err;
|
||||
err = cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);
|
||||
cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);
|
||||
kfree(cq->sw_queue);
|
||||
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
|
||||
(1UL << (cq->size_log2))
|
||||
* sizeof(struct t3_cqe) + 1, cq->queue,
|
||||
dma_unmap_addr(cq, mapping));
|
||||
cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
|
||||
return err;
|
||||
}
|
||||
|
||||
int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
|
||||
|
@ -158,7 +158,7 @@ void cxio_rdev_close(struct cxio_rdev *rdev);
|
||||
int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
|
||||
enum t3_cq_opcode op, u32 credit);
|
||||
int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel);
|
||||
int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
|
||||
void cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
|
||||
void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
|
||||
void cxio_init_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
|
||||
int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq,
|
||||
|
@ -88,7 +88,7 @@ static int iwch_alloc_ucontext(struct ib_ucontext *ucontext,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwch_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
static void iwch_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
{
|
||||
struct iwch_cq *chp;
|
||||
|
||||
@ -101,7 +101,6 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
|
||||
cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
|
||||
kfree(chp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev,
|
||||
|
@ -34,14 +34,13 @@
|
||||
|
||||
#include "iw_cxgb4.h"
|
||||
|
||||
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||
struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
|
||||
struct c4iw_wr_wait *wr_waitp)
|
||||
static void destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||
struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
|
||||
struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
struct fw_ri_res_wr *res_wr;
|
||||
struct fw_ri_res *res;
|
||||
int wr_len;
|
||||
int ret;
|
||||
|
||||
wr_len = sizeof(*res_wr) + sizeof(*res);
|
||||
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
|
||||
@ -59,14 +58,13 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||
res->u.cq.iqid = cpu_to_be32(cq->cqid);
|
||||
|
||||
c4iw_init_wr_wait(wr_waitp);
|
||||
ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
|
||||
c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
|
||||
|
||||
kfree(cq->sw_queue);
|
||||
dma_free_coherent(&(rdev->lldi.pdev->dev),
|
||||
cq->memsize, cq->queue,
|
||||
dma_unmap_addr(cq, mapping));
|
||||
c4iw_put_cqid(rdev, cq->cqid, uctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||
@ -970,7 +968,7 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
|
||||
return !err || err == -ENODATA ? npolled : err;
|
||||
}
|
||||
|
||||
int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
{
|
||||
struct c4iw_cq *chp;
|
||||
struct c4iw_ucontext *ucontext;
|
||||
@ -989,7 +987,6 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
chp->destroy_skb, chp->wr_waitp);
|
||||
c4iw_put_wr_wait(chp->wr_waitp);
|
||||
kfree(chp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
|
@ -992,7 +992,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
|
||||
struct ib_udata *udata);
|
||||
struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
|
||||
int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
|
||||
int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
|
||||
void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
|
||||
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
|
@ -134,7 +134,7 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
|
||||
struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
|
||||
struct ib_qp_init_attr *init_attr,
|
||||
struct ib_udata *udata);
|
||||
int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
struct ib_cq *efa_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
|
@ -847,25 +847,20 @@ static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
|
||||
return efa_com_destroy_cq(&dev->edev, ¶ms);
|
||||
}
|
||||
|
||||
int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
{
|
||||
struct efa_dev *dev = to_edev(ibcq->device);
|
||||
struct efa_cq *cq = to_ecq(ibcq);
|
||||
int err;
|
||||
|
||||
ibdev_dbg(&dev->ibdev,
|
||||
"Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
|
||||
cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
|
||||
|
||||
err = efa_destroy_cq_idx(dev, cq->cq_idx);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
efa_destroy_cq_idx(dev, cq->cq_idx);
|
||||
dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
kfree(cq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
|
||||
|
@ -443,40 +443,36 @@ err_cq:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_ib_create_cq);
|
||||
|
||||
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
|
||||
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
|
||||
int ret = 0;
|
||||
|
||||
if (hr_dev->hw->destroy_cq) {
|
||||
ret = hr_dev->hw->destroy_cq(ib_cq, udata);
|
||||
} else {
|
||||
hns_roce_free_cq(hr_dev, hr_cq);
|
||||
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
|
||||
|
||||
if (udata) {
|
||||
ib_umem_release(hr_cq->umem);
|
||||
|
||||
if (hr_cq->db_en == 1)
|
||||
hns_roce_db_unmap_user(
|
||||
rdma_udata_to_drv_context(
|
||||
udata,
|
||||
struct hns_roce_ucontext,
|
||||
ibucontext),
|
||||
&hr_cq->db);
|
||||
} else {
|
||||
/* Free the buff of stored cq */
|
||||
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
|
||||
ib_cq->cqe);
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
|
||||
hns_roce_free_db(hr_dev, &hr_cq->db);
|
||||
}
|
||||
|
||||
kfree(hr_cq);
|
||||
hr_dev->hw->destroy_cq(ib_cq, udata);
|
||||
return;
|
||||
}
|
||||
|
||||
return ret;
|
||||
hns_roce_free_cq(hr_dev, hr_cq);
|
||||
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
|
||||
|
||||
if (udata) {
|
||||
ib_umem_release(hr_cq->umem);
|
||||
|
||||
if (hr_cq->db_en == 1)
|
||||
hns_roce_db_unmap_user(rdma_udata_to_drv_context(
|
||||
udata,
|
||||
struct hns_roce_ucontext,
|
||||
ibucontext),
|
||||
&hr_cq->db);
|
||||
} else {
|
||||
/* Free the buff of stored cq */
|
||||
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
|
||||
hns_roce_free_db(hr_dev, &hr_cq->db);
|
||||
}
|
||||
|
||||
kfree(hr_cq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_ib_destroy_cq);
|
||||
|
||||
|
@ -938,7 +938,7 @@ struct hns_roce_hw {
|
||||
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
|
||||
struct ib_udata *udata);
|
||||
int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
void (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
||||
int (*init_eq)(struct hns_roce_dev *hr_dev);
|
||||
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
|
||||
@ -1209,7 +1209,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
|
||||
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
|
||||
void hns_roce_ib_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
|
||||
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
|
||||
|
||||
int hns_roce_db_map_user(struct hns_roce_ucontext *context,
|
||||
|
@ -865,8 +865,7 @@ alloc_pd_failed:
|
||||
kfree(pd);
|
||||
|
||||
alloc_mem_failed:
|
||||
if (hns_roce_ib_destroy_cq(cq, NULL))
|
||||
dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
|
||||
hns_roce_ib_destroy_cq(cq, NULL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -894,10 +893,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
|
||||
i, ret);
|
||||
}
|
||||
|
||||
ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
|
||||
if (ret)
|
||||
dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
|
||||
|
||||
hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
|
||||
hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
|
||||
}
|
||||
|
||||
@ -3654,7 +3650,7 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
|
||||
struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
|
||||
@ -3663,7 +3659,6 @@ static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
u32 cqe_cnt_cur;
|
||||
u32 cq_buf_size;
|
||||
int wait_time = 0;
|
||||
int ret = 0;
|
||||
|
||||
hns_roce_free_cq(hr_dev, hr_cq);
|
||||
|
||||
@ -3685,7 +3680,6 @@ static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
|
||||
dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
|
||||
hr_cq->cqn);
|
||||
ret = -ETIMEDOUT;
|
||||
break;
|
||||
}
|
||||
wait_time++;
|
||||
@ -3702,8 +3696,6 @@ static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
}
|
||||
|
||||
kfree(hr_cq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
|
||||
|
@ -1064,7 +1064,7 @@ void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
|
||||
* @ib_cq: cq pointer
|
||||
* @udata: user data or NULL for kernel object
|
||||
*/
|
||||
static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
{
|
||||
struct i40iw_cq *iwcq;
|
||||
struct i40iw_device *iwdev;
|
||||
@ -1077,7 +1077,6 @@ static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
cq_free_resources(iwdev, iwcq);
|
||||
kfree(iwcq);
|
||||
i40iw_rem_devusecount(iwdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -486,7 +486,7 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
||||
void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
||||
{
|
||||
struct mlx4_ib_dev *dev = to_mdev(cq->device);
|
||||
struct mlx4_ib_cq *mcq = to_mcq(cq);
|
||||
@ -508,8 +508,6 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
||||
}
|
||||
|
||||
kfree(mcq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dump_cqe(void *cqe)
|
||||
|
@ -746,7 +746,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
|
||||
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
int mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
|
||||
void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
|
||||
|
@ -998,7 +998,7 @@ err_create:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
||||
void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(cq->device);
|
||||
struct mlx5_ib_cq *mcq = to_mcq(cq);
|
||||
@ -1010,8 +1010,6 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
||||
destroy_cq_kernel(dev, mcq);
|
||||
|
||||
kfree(mcq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
|
||||
|
@ -1118,7 +1118,7 @@ int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
|
||||
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
||||
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
||||
|
@ -804,7 +804,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
||||
static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
||||
{
|
||||
if (udata) {
|
||||
struct mthca_ucontext *context =
|
||||
@ -824,8 +824,6 @@ static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
||||
}
|
||||
mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
|
||||
kfree(cq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u32 convert_access(int acc)
|
||||
|
@ -588,9 +588,7 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
|
||||
cqp_request->callback = 0;
|
||||
nes_debug(NES_DBG_CQP, "Got cqp request %p from the available list \n",
|
||||
cqp_request);
|
||||
} else
|
||||
printk(KERN_ERR PFX "%s: Could not allocated a CQP request.\n",
|
||||
__func__);
|
||||
}
|
||||
|
||||
return cqp_request;
|
||||
}
|
||||
|
@ -1634,7 +1634,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev,
|
||||
/**
|
||||
* nes_destroy_cq
|
||||
*/
|
||||
static int nes_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
static void nes_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
{
|
||||
struct nes_cq *nescq;
|
||||
struct nes_device *nesdev;
|
||||
@ -1644,7 +1644,6 @@ static int nes_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
struct nes_cqp_request cqp_request = {};
|
||||
unsigned long flags;
|
||||
u32 opcode = 0;
|
||||
int ret;
|
||||
|
||||
nescq = to_nescq(ib_cq);
|
||||
nesvnic = to_nesvnic(ib_cq->device);
|
||||
@ -1656,6 +1655,7 @@ static int nes_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
/* Send DestroyCQ request to CQP */
|
||||
INIT_LIST_HEAD(&cqp_request.list);
|
||||
init_waitqueue_head(&cqp_request.waitq);
|
||||
|
||||
cqp_request.waiting = 1;
|
||||
cqp_wqe = &cqp_request.cqp_wqe;
|
||||
opcode = NES_CQP_DESTROY_CQ | (nescq->hw_cq.cq_size << 16);
|
||||
@ -1689,30 +1689,18 @@ static int nes_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
|
||||
/* Wait for CQP */
|
||||
nes_debug(NES_DBG_CQ, "Waiting for destroy iWARP CQ%u to complete.\n",
|
||||
nescq->hw_cq.cq_number);
|
||||
ret = wait_event_timeout(cqp_request.waitq, cqp_request.request_done,
|
||||
NES_EVENT_TIMEOUT);
|
||||
nes_debug(NES_DBG_CQ, "Destroy iWARP CQ%u completed, wait_event_timeout ret = %u,"
|
||||
" CQP Major:Minor codes = 0x%04X:0x%04X.\n",
|
||||
nescq->hw_cq.cq_number, ret, cqp_request.major_code,
|
||||
cqp_request.minor_code);
|
||||
if (!ret) {
|
||||
nes_debug(NES_DBG_CQ, "iWARP CQ%u destroy timeout expired\n",
|
||||
nescq->hw_cq.cq_number);
|
||||
ret = -ETIME;
|
||||
} else if (cqp_request.major_code) {
|
||||
nes_debug(NES_DBG_CQ, "iWARP CQ%u destroy failed\n",
|
||||
nescq->hw_cq.cq_number);
|
||||
ret = -EIO;
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
wait_event_timeout(cqp_request.waitq, cqp_request.request_done,
|
||||
NES_EVENT_TIMEOUT);
|
||||
nes_debug(
|
||||
NES_DBG_CQ,
|
||||
"Destroy iWARP CQ%u completed CQP Major:Minor codes = 0x%04X:0x%04X.\n",
|
||||
nescq->hw_cq.cq_number, cqp_request.major_code,
|
||||
cqp_request.minor_code);
|
||||
|
||||
if (nescq->cq_mem_size)
|
||||
pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size,
|
||||
nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase);
|
||||
kfree(nescq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1888,14 +1888,13 @@ mem_err:
|
||||
return status;
|
||||
}
|
||||
|
||||
int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
|
||||
void ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
|
||||
{
|
||||
int status = -ENOMEM;
|
||||
struct ocrdma_destroy_cq *cmd;
|
||||
|
||||
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd));
|
||||
if (!cmd)
|
||||
return status;
|
||||
return;
|
||||
ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ,
|
||||
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
|
||||
|
||||
@ -1903,11 +1902,10 @@ int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
|
||||
(cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
|
||||
OCRDMA_DESTROY_CQ_QID_MASK;
|
||||
|
||||
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
|
||||
ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
|
||||
ocrdma_unbind_eq(dev, cq->eqn);
|
||||
dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
|
||||
kfree(cmd);
|
||||
return status;
|
||||
}
|
||||
|
||||
int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
|
||||
|
@ -122,7 +122,7 @@ int ocrdma_reg_mr(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr,
|
||||
u32 pd_id, int acc);
|
||||
int ocrdma_mbx_create_cq(struct ocrdma_dev *, struct ocrdma_cq *,
|
||||
int entries, int dpp_cq, u16 pd_id);
|
||||
int ocrdma_mbx_destroy_cq(struct ocrdma_dev *, struct ocrdma_cq *);
|
||||
void ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq);
|
||||
|
||||
int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs,
|
||||
u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
|
||||
|
@ -1070,7 +1070,7 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
|
||||
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
||||
}
|
||||
|
||||
int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
{
|
||||
struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
|
||||
struct ocrdma_eq *eq = NULL;
|
||||
@ -1080,14 +1080,13 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
|
||||
dev->cq_tbl[cq->id] = NULL;
|
||||
indx = ocrdma_get_eq_table_index(dev, cq->eqn);
|
||||
BUG_ON(indx == -EINVAL);
|
||||
|
||||
eq = &dev->eq_tbl[indx];
|
||||
irq = ocrdma_get_irq(dev, eq);
|
||||
synchronize_irq(irq);
|
||||
ocrdma_flush_cq(cq);
|
||||
|
||||
(void)ocrdma_mbx_destroy_cq(dev, cq);
|
||||
ocrdma_mbx_destroy_cq(dev, cq);
|
||||
if (cq->ucontext) {
|
||||
pdid = cq->ucontext->cntxt_pd->id;
|
||||
ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
|
||||
@ -1098,7 +1097,6 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
}
|
||||
|
||||
kfree(cq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
|
||||
|
@ -75,7 +75,7 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
|
||||
int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
|
||||
struct ib_qp *ocrdma_create_qp(struct ib_pd *,
|
||||
struct ib_qp_init_attr *attrs,
|
||||
|
@ -955,14 +955,13 @@ int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
|
||||
#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
|
||||
#define QEDR_DESTROY_CQ_ITER_DURATION (10)
|
||||
|
||||
int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
{
|
||||
struct qedr_dev *dev = get_qedr_dev(ibcq->device);
|
||||
struct qed_rdma_destroy_cq_out_params oparams;
|
||||
struct qed_rdma_destroy_cq_in_params iparams;
|
||||
struct qedr_cq *cq = get_qedr_cq(ibcq);
|
||||
int iter;
|
||||
int rc;
|
||||
|
||||
DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
|
||||
|
||||
@ -973,10 +972,7 @@ int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
goto done;
|
||||
|
||||
iparams.icid = cq->icid;
|
||||
rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
|
||||
dev->ops->common->chain_free(dev->cdev, &cq->pbl);
|
||||
|
||||
if (udata) {
|
||||
@ -1007,9 +1003,6 @@ int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
iter--;
|
||||
}
|
||||
|
||||
if (oparams.num_cq_notif != cq->cnq_notif)
|
||||
goto err;
|
||||
|
||||
/* Note that we don't need to have explicit code to wait for the
|
||||
* completion of the event handler because it is invoked from the EQ.
|
||||
* Since the destroy CQ ramrod has also been received on the EQ we can
|
||||
@ -1019,15 +1012,6 @@ done:
|
||||
cq->sig = ~cq->sig;
|
||||
|
||||
kfree(cq);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
DP_ERR(dev,
|
||||
"CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n",
|
||||
cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int get_gid_info_from_table(struct ib_qp *ibqp,
|
||||
|
@ -54,7 +54,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
|
||||
int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
void qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
|
||||
struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
|
||||
struct ib_udata *);
|
||||
|
@ -604,11 +604,9 @@ struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
|
||||
return cq;
|
||||
}
|
||||
|
||||
int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
||||
void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
||||
{
|
||||
usnic_dbg("\n");
|
||||
kfree(cq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
|
@ -61,7 +61,7 @@ int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt_addr, int access_flags,
|
||||
struct ib_udata *udata);
|
||||
|
@ -246,10 +246,8 @@ static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
|
||||
* pvrdma_destroy_cq - destroy completion queue
|
||||
* @cq: the completion queue to destroy.
|
||||
* @udata: user data or null for kernel object
|
||||
*
|
||||
* @return: 0 for success.
|
||||
*/
|
||||
int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
||||
void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
||||
{
|
||||
struct pvrdma_cq *vcq = to_vcq(cq);
|
||||
union pvrdma_cmd_req req;
|
||||
@ -275,8 +273,6 @@ int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
|
||||
|
||||
pvrdma_free_cq(dev, vcq);
|
||||
atomic_dec(&dev->num_cqs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i)
|
||||
|
@ -412,7 +412,7 @@ int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
|
||||
struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
int pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
|
||||
int pvrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
|
||||
int pvrdma_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
|
||||
|
@ -300,10 +300,8 @@ done:
|
||||
* @udata: user data or NULL for kernel object
|
||||
*
|
||||
* Called by ib_destroy_cq() in the generic verbs code.
|
||||
*
|
||||
* Return: always 0
|
||||
*/
|
||||
int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
{
|
||||
struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
|
||||
struct rvt_dev_info *rdi = cq->rdi;
|
||||
@ -317,8 +315,6 @@ int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
else
|
||||
vfree(cq->queue);
|
||||
kfree(cq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -54,7 +54,7 @@
|
||||
struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
|
||||
int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
|
||||
int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
|
||||
int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
|
||||
|
@ -819,14 +819,13 @@ err1:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
static void rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
|
||||
{
|
||||
struct rxe_cq *cq = to_rcq(ibcq);
|
||||
|
||||
rxe_cq_disable(cq);
|
||||
|
||||
rxe_drop_ref(cq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
|
||||
|
@ -2462,7 +2462,7 @@ struct ib_device_ops {
|
||||
const struct ib_cq_init_attr *attr,
|
||||
struct ib_udata *udata);
|
||||
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
||||
int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
|
||||
void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
|
||||
int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
|
||||
struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
|
||||
struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
|
||||
|
Loading…
Reference in New Issue
Block a user