RDMA/iw_cxgb4: Low resource fixes for Completion queue
Pre-allocate buffers to deallocate completion queue, so that completion queue is deallocated during RDMA termination when system is running out of memory. Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
committed by
Doug Ledford
parent
0f8ab0b6e9
commit
dd6b024126
@@ -33,19 +33,15 @@
|
|||||||
#include "iw_cxgb4.h"
|
#include "iw_cxgb4.h"
|
||||||
|
|
||||||
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||||
struct c4iw_dev_ucontext *uctx)
|
struct c4iw_dev_ucontext *uctx, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct fw_ri_res_wr *res_wr;
|
struct fw_ri_res_wr *res_wr;
|
||||||
struct fw_ri_res *res;
|
struct fw_ri_res *res;
|
||||||
int wr_len;
|
int wr_len;
|
||||||
struct c4iw_wr_wait wr_wait;
|
struct c4iw_wr_wait wr_wait;
|
||||||
struct sk_buff *skb;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
wr_len = sizeof *res_wr + sizeof *res;
|
wr_len = sizeof *res_wr + sizeof *res;
|
||||||
skb = alloc_skb(wr_len, GFP_KERNEL);
|
|
||||||
if (!skb)
|
|
||||||
return -ENOMEM;
|
|
||||||
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
|
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
|
||||||
|
|
||||||
res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
|
res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
|
||||||
@@ -863,7 +859,9 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
|
|||||||
ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
|
ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
|
||||||
: NULL;
|
: NULL;
|
||||||
destroy_cq(&chp->rhp->rdev, &chp->cq,
|
destroy_cq(&chp->rhp->rdev, &chp->cq,
|
||||||
ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
|
ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
|
||||||
|
chp->destroy_skb);
|
||||||
|
chp->destroy_skb = NULL;
|
||||||
kfree(chp);
|
kfree(chp);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -879,7 +877,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
|||||||
struct c4iw_cq *chp;
|
struct c4iw_cq *chp;
|
||||||
struct c4iw_create_cq_resp uresp;
|
struct c4iw_create_cq_resp uresp;
|
||||||
struct c4iw_ucontext *ucontext = NULL;
|
struct c4iw_ucontext *ucontext = NULL;
|
||||||
int ret;
|
int ret, wr_len;
|
||||||
size_t memsize, hwentries;
|
size_t memsize, hwentries;
|
||||||
struct c4iw_mm_entry *mm, *mm2;
|
struct c4iw_mm_entry *mm, *mm2;
|
||||||
|
|
||||||
@@ -896,6 +894,13 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
|||||||
if (!chp)
|
if (!chp)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
|
||||||
|
chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
|
||||||
|
if (!chp->destroy_skb) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto err1;
|
||||||
|
}
|
||||||
|
|
||||||
if (ib_context)
|
if (ib_context)
|
||||||
ucontext = to_c4iw_ucontext(ib_context);
|
ucontext = to_c4iw_ucontext(ib_context);
|
||||||
|
|
||||||
@@ -936,7 +941,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
|||||||
ret = create_cq(&rhp->rdev, &chp->cq,
|
ret = create_cq(&rhp->rdev, &chp->cq,
|
||||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err1;
|
goto err2;
|
||||||
|
|
||||||
chp->rhp = rhp;
|
chp->rhp = rhp;
|
||||||
chp->cq.size--; /* status page */
|
chp->cq.size--; /* status page */
|
||||||
@@ -947,15 +952,15 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
|||||||
init_waitqueue_head(&chp->wait);
|
init_waitqueue_head(&chp->wait);
|
||||||
ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
|
ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err2;
|
goto err3;
|
||||||
|
|
||||||
if (ucontext) {
|
if (ucontext) {
|
||||||
mm = kmalloc(sizeof *mm, GFP_KERNEL);
|
mm = kmalloc(sizeof *mm, GFP_KERNEL);
|
||||||
if (!mm)
|
if (!mm)
|
||||||
goto err3;
|
goto err4;
|
||||||
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
|
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
|
||||||
if (!mm2)
|
if (!mm2)
|
||||||
goto err4;
|
goto err5;
|
||||||
|
|
||||||
uresp.qid_mask = rhp->rdev.cqmask;
|
uresp.qid_mask = rhp->rdev.cqmask;
|
||||||
uresp.cqid = chp->cq.cqid;
|
uresp.cqid = chp->cq.cqid;
|
||||||
@@ -970,7 +975,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
|||||||
ret = ib_copy_to_udata(udata, &uresp,
|
ret = ib_copy_to_udata(udata, &uresp,
|
||||||
sizeof(uresp) - sizeof(uresp.reserved));
|
sizeof(uresp) - sizeof(uresp.reserved));
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err5;
|
goto err6;
|
||||||
|
|
||||||
mm->key = uresp.key;
|
mm->key = uresp.key;
|
||||||
mm->addr = virt_to_phys(chp->cq.queue);
|
mm->addr = virt_to_phys(chp->cq.queue);
|
||||||
@@ -986,15 +991,18 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
|||||||
__func__, chp->cq.cqid, chp, chp->cq.size,
|
__func__, chp->cq.cqid, chp, chp->cq.size,
|
||||||
chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
|
chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
|
||||||
return &chp->ibcq;
|
return &chp->ibcq;
|
||||||
err5:
|
err6:
|
||||||
kfree(mm2);
|
kfree(mm2);
|
||||||
err4:
|
err5:
|
||||||
kfree(mm);
|
kfree(mm);
|
||||||
err3:
|
err4:
|
||||||
remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
|
remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
|
||||||
err2:
|
err3:
|
||||||
destroy_cq(&chp->rhp->rdev, &chp->cq,
|
destroy_cq(&chp->rhp->rdev, &chp->cq,
|
||||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
|
||||||
|
chp->destroy_skb);
|
||||||
|
err2:
|
||||||
|
kfree_skb(chp->destroy_skb);
|
||||||
err1:
|
err1:
|
||||||
kfree(chp);
|
kfree(chp);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|||||||
@@ -414,6 +414,7 @@ static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
|
|||||||
struct c4iw_cq {
|
struct c4iw_cq {
|
||||||
struct ib_cq ibcq;
|
struct ib_cq ibcq;
|
||||||
struct c4iw_dev *rhp;
|
struct c4iw_dev *rhp;
|
||||||
|
struct sk_buff *destroy_skb;
|
||||||
struct t4_cq cq;
|
struct t4_cq cq;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
spinlock_t comp_handler_lock;
|
spinlock_t comp_handler_lock;
|
||||||
|
|||||||
Reference in New Issue
Block a user