From 92ea011f7cbade821ebd56a1f70d20331c0320c8 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 20 Jun 2017 19:35:39 -0400 Subject: [PATCH] SUNRPC: Make slot allocation more reliable In xprt_alloc_slot(), the spin lock is only needed to provide atomicity between the atomic_add_unless() failure and the call to xprt_add_backlog(). We do not actually need to hold it across the memory allocation itself. By dropping the lock, we can use a more resilient GFP_NOFS allocation, just as we now do in the rest of the RPC client code. Signed-off-by: Trond Myklebust Signed-off-by: Anna Schumaker --- net/sunrpc/xprt.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 3e63c5e97ebe..4654a9934269 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1047,13 +1047,15 @@ out: return ret; } -static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags) +static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt) { struct rpc_rqst *req = ERR_PTR(-EAGAIN); if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs)) goto out; - req = kzalloc(sizeof(struct rpc_rqst), gfp_flags); + spin_unlock(&xprt->reserve_lock); + req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS); + spin_lock(&xprt->reserve_lock); if (req != NULL) goto out; atomic_dec(&xprt->num_reqs); @@ -1081,7 +1083,7 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) list_del(&req->rq_list); goto out_init_req; } - req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN); + req = xprt_dynamic_alloc_slot(xprt); if (!IS_ERR(req)) goto out_init_req; switch (PTR_ERR(req)) {