SUNRPC: Make slot allocation more reliable
In xprt_alloc_slot(), the spin lock is only needed to provide atomicity between the atomic_add_unless() failure and the call to xprt_add_backlog(). We do not actually need to hold it across the memory allocation itself. By dropping the lock, we can use a more resilient GFP_NOFS allocation, just as we now do in the rest of the RPC client code. Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
818a8dbe83
commit
92ea011f7c
@ -1047,13 +1047,15 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
|
static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
|
||||||
{
|
{
|
||||||
struct rpc_rqst *req = ERR_PTR(-EAGAIN);
|
struct rpc_rqst *req = ERR_PTR(-EAGAIN);
|
||||||
|
|
||||||
if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
|
if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
|
||||||
goto out;
|
goto out;
|
||||||
req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
|
spin_unlock(&xprt->reserve_lock);
|
||||||
|
req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
|
||||||
|
spin_lock(&xprt->reserve_lock);
|
||||||
if (req != NULL)
|
if (req != NULL)
|
||||||
goto out;
|
goto out;
|
||||||
atomic_dec(&xprt->num_reqs);
|
atomic_dec(&xprt->num_reqs);
|
||||||
@ -1081,7 +1083,7 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
|
|||||||
list_del(&req->rq_list);
|
list_del(&req->rq_list);
|
||||||
goto out_init_req;
|
goto out_init_req;
|
||||||
}
|
}
|
||||||
req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
|
req = xprt_dynamic_alloc_slot(xprt);
|
||||||
if (!IS_ERR(req))
|
if (!IS_ERR(req))
|
||||||
goto out_init_req;
|
goto out_init_req;
|
||||||
switch (PTR_ERR(req)) {
|
switch (PTR_ERR(req)) {
|
||||||
|
Loading…
Reference in New Issue
Block a user