1
0
Fork 0

xprtrdma: De-duplicate "allocate new, free old regbuf"

Clean up by providing an API to do this common task.

At this point, the difference between rpcrdma_get_sendbuf and
rpcrdma_get_recvbuf has become tiny. These can be collapsed into a
single helper.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
hifive-unleashed-5.2
Chuck Lever 2019-04-24 09:39:27 -04:00 committed by Anna Schumaker
parent bb93a1ae2b
commit 0f665ceb71
3 changed files with 39 additions and 47 deletions

View File

@ -585,52 +585,15 @@ xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst)
rpc_wake_up_next(&xprt->backlog);
}
static bool
rpcrdma_get_sendbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
size_t size, gfp_t flags)
static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_regbuf *rb, size_t size,
gfp_t flags)
{
struct rpcrdma_regbuf *rb;
if (likely(rdmab_length(req->rl_sendbuf) >= size))
return true;
rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, flags);
if (!rb)
return false;
rpcrdma_free_regbuf(req->rl_sendbuf);
r_xprt->rx_stats.hardway_register_count += size;
req->rl_sendbuf = rb;
return true;
}
/* The rq_rcv_buf is used only if a Reply chunk is necessary.
* The decision to use a Reply chunk is made later in
* rpcrdma_marshal_req. This buffer is registered at that time.
*
* Otherwise, the associated RPC Reply arrives in a separate
* Receive buffer, arbitrarily chosen by the HCA. The buffer
* allocated here for the RPC Reply is not utilized in that
* case. See rpcrdma_inline_fixup.
*
* A regbuf is used here to remember the buffer size.
*/
static bool
rpcrdma_get_recvbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
size_t size, gfp_t flags)
{
struct rpcrdma_regbuf *rb;
if (likely(rdmab_length(req->rl_recvbuf) >= size))
return true;
rb = rpcrdma_alloc_regbuf(size, DMA_NONE, flags);
if (!rb)
return false;
rpcrdma_free_regbuf(req->rl_recvbuf);
r_xprt->rx_stats.hardway_register_count += size;
req->rl_recvbuf = rb;
if (unlikely(rdmab_length(rb) < size)) {
if (!rpcrdma_regbuf_realloc(rb, size, flags))
return false;
r_xprt->rx_stats.hardway_register_count += size;
}
return true;
}
@ -655,9 +618,11 @@ xprt_rdma_allocate(struct rpc_task *task)
if (RPC_IS_SWAPPER(task))
flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
if (!rpcrdma_get_sendbuf(r_xprt, req, rqst->rq_callsize, flags))
if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize,
flags))
goto out_fail;
if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags))
if (!rpcrdma_check_regbuf(r_xprt, req->rl_recvbuf, rqst->rq_rcvsize,
flags))
goto out_fail;
rqst->rq_buffer = rdmab_data(req->rl_sendbuf);

View File

@ -1399,6 +1399,31 @@ rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction,
return rb;
}
/**
* rpcrdma_regbuf_realloc - re-allocate a SEND/RECV buffer
* @rb: regbuf to reallocate
* @size: size of buffer to be allocated, in bytes
* @flags: GFP flags
*
* Returns true if reallocation was successful. If false is
* returned, @rb is left untouched.
*/
bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags)
{
void *buf;
buf = kmalloc(size, flags);
if (!buf)
return false;
rpcrdma_dma_unmap_regbuf(rb);
kfree(rb->rg_data);
rb->rg_data = buf;
rb->rg_iov.length = size;
return true;
}
/**
* __rpcrdma_map_regbuf - DMA-map a regbuf
* @ia: controlling rpcrdma_ia

View File

@ -552,6 +552,8 @@ void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(size_t, enum dma_data_direction,
gfp_t);
bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size,
gfp_t flags);
bool __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *, struct rpcrdma_regbuf *);
void rpcrdma_free_regbuf(struct rpcrdma_regbuf *);