forked from Minki/linux
xprtrdma: Handle non-SEND completions via a callout
Allow each memory registration mode to plug in a callout that handles the completion of a memory registration operation. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Sagi Grimberg <sagig@mellanox.com> Tested-by: Devesh Sharma <Devesh.Sharma@Emulex.Com> Tested-by: Meghana Cheripady <Meghana.Cheripady@Emulex.Com> Tested-by: Veeresh U. Kokatnur <veereshuk@chelsio.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
3968cb5850
commit
e46ac34c3c
@ -117,6 +117,22 @@ frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
|
||||
rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth);
|
||||
}
|
||||
|
||||
/* If FAST_REG or LOCAL_INV failed, indicate the frmr needs to be reset. */
|
||||
static void
|
||||
frwr_sendcompletion(struct ib_wc *wc)
|
||||
{
|
||||
struct rpcrdma_mw *r;
|
||||
|
||||
if (likely(wc->status == IB_WC_SUCCESS))
|
||||
return;
|
||||
|
||||
/* WARNING: Only wr_id and status are reliable at this point */
|
||||
r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
|
||||
dprintk("RPC: %s: frmr %p (stale), status %d\n",
|
||||
__func__, r, wc->status);
|
||||
r->r.frmr.fr_state = FRMR_IS_STALE;
|
||||
}
|
||||
|
||||
static int
|
||||
frwr_op_init(struct rpcrdma_xprt *r_xprt)
|
||||
{
|
||||
@ -148,6 +164,7 @@ frwr_op_init(struct rpcrdma_xprt *r_xprt)
|
||||
|
||||
list_add(&r->mw_list, &buf->rb_mws);
|
||||
list_add(&r->mw_all, &buf->rb_all);
|
||||
r->mw_sendcompletion = frwr_sendcompletion;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -186,7 +186,7 @@ static const char * const wc_status[] = {
|
||||
"remote access error",
|
||||
"remote operation error",
|
||||
"transport retry counter exceeded",
|
||||
"RNR retrycounter exceeded",
|
||||
"RNR retry counter exceeded",
|
||||
"local RDD violation error",
|
||||
"remove invalid RD request",
|
||||
"operation aborted",
|
||||
@ -204,21 +204,17 @@ static const char * const wc_status[] = {
|
||||
static void
|
||||
rpcrdma_sendcq_process_wc(struct ib_wc *wc)
|
||||
{
|
||||
if (likely(wc->status == IB_WC_SUCCESS))
|
||||
return;
|
||||
|
||||
/* WARNING: Only wr_id and status are reliable at this point */
|
||||
if (wc->wr_id == 0ULL) {
|
||||
if (wc->status != IB_WC_WR_FLUSH_ERR)
|
||||
if (wc->wr_id == RPCRDMA_IGNORE_COMPLETION) {
|
||||
if (wc->status != IB_WC_SUCCESS &&
|
||||
wc->status != IB_WC_WR_FLUSH_ERR)
|
||||
pr_err("RPC: %s: SEND: %s\n",
|
||||
__func__, COMPLETION_MSG(wc->status));
|
||||
} else {
|
||||
struct rpcrdma_mw *r;
|
||||
|
||||
r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
|
||||
r->r.frmr.fr_state = FRMR_IS_STALE;
|
||||
pr_err("RPC: %s: frmr %p (stale): %s\n",
|
||||
__func__, r, COMPLETION_MSG(wc->status));
|
||||
r->mw_sendcompletion(wc);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1622,7 +1618,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
|
||||
}
|
||||
|
||||
send_wr.next = NULL;
|
||||
send_wr.wr_id = 0ULL; /* no send cookie */
|
||||
send_wr.wr_id = RPCRDMA_IGNORE_COMPLETION;
|
||||
send_wr.sg_list = req->rl_send_iov;
|
||||
send_wr.num_sge = req->rl_niovs;
|
||||
send_wr.opcode = IB_WR_SEND;
|
||||
|
@ -106,6 +106,10 @@ struct rpcrdma_ep {
|
||||
#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
|
||||
#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
|
||||
|
||||
/* Force completion handler to ignore the signal
|
||||
*/
|
||||
#define RPCRDMA_IGNORE_COMPLETION (0ULL)
|
||||
|
||||
/* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
|
||||
*
|
||||
* The below structure appears at the front of a large region of kmalloc'd
|
||||
@ -206,6 +210,7 @@ struct rpcrdma_mw {
|
||||
struct ib_fmr *fmr;
|
||||
struct rpcrdma_frmr frmr;
|
||||
} r;
|
||||
void (*mw_sendcompletion)(struct ib_wc *);
|
||||
struct list_head mw_list;
|
||||
struct list_head mw_all;
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user