rpcrdma_reps are dynamically allocated according to the server's credit grant and the workload. rpcrdma_reqs can also be allocated on demand. Having a number of these around means the transport can handle minor bursts of RPC traffic easily. Unlike TCP's dynamic slot allocator, these are not released by ->free_slot. This is because rpcrdma_reqs are large and have DMA mapped buffers associated with them, and thus are costly to set up. Instead, if the system needs memory, a shrinker can steal a few free rpcrdma_reqs per transport. That can be added later if there is a strong need. Signed-off-by: Chuck Lever <chuck.lever@xxxxxxxxxx> --- net/sunrpc/xprtrdma/transport.c | 1 + net/sunrpc/xprtrdma/verbs.c | 12 ++++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 3cbc9b7..254cb8c 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -450,6 +450,7 @@ xprt_clear_connected(xprt); rpcrdma_ia_remove(ia); clear_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags); + rpc_wake_up_next(&xprt->backlog); return; } if (ep->rep_connected == -ENODEV) diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 6308e60..73d5247 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -1171,7 +1171,7 @@ struct rpcrdma_req * INIT_LIST_HEAD(&buf->rb_allreqs); rc = -ENOMEM; - for (i = 0; i < buf->rb_max_requests; i++) { + for (i = 0; i < RPCRDMA_MIN_SLOT_TABLE; i++) { struct rpcrdma_req *req; req = rpcrdma_req_create(buf, GFP_KERNEL); @@ -1360,9 +1360,13 @@ struct rpcrdma_req * spin_lock(&buffers->rb_lock); req = list_first_entry_or_null(&buffers->rb_send_bufs, struct rpcrdma_req, rl_list); - if (req) - list_del_init(&req->rl_list); - spin_unlock(&buffers->rb_lock); + if (req) { + list_del(&req->rl_list); + spin_unlock(&buffers->rb_lock); + } else { + spin_unlock(&buffers->rb_lock); + req = rpcrdma_req_create(buffers, GFP_NOFS); + } return req; }