In a subsequent patch, we'll be invoking rpcrdma_req_create() during transport operation. To protect the walk of rb_allreqs in rpcrdma_ia_remove, block creation of rpcrdma_reqs while a removal is in progress. Because the caller of ->close is holding the transport send lock, there can be only one call at a time. We don't need the test_and_set mechanism to prevent multiple callers to rpcrdma_ia_remove. Comments are clarified to note how the data structures are protected. Signed-off-by: Chuck Lever <chuck.lever@xxxxxxxxxx> --- net/sunrpc/xprtrdma/transport.c | 3 ++- net/sunrpc/xprtrdma/verbs.c | 18 ++++++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 2ba8be1..3cbc9b7 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -446,9 +446,10 @@ dprintk("RPC: %s: closing xprt %p\n", __func__, xprt); - if (test_and_clear_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags)) { + if (test_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags)) { xprt_clear_connected(xprt); rpcrdma_ia_remove(ia); + clear_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags); return; } if (ep->rep_connected == -ENODEV) diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 42a6ef6..6308e60 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -452,8 +452,7 @@ * rpcrdma_ia_remove - Handle device driver unload * @ia: interface adapter being removed * - * Divest transport H/W resources associated with this adapter, - * but allow it to be restored later. + * Caller holds the transport send lock. */ void rpcrdma_ia_remove(struct rpcrdma_ia *ia) @@ -484,16 +483,23 @@ ib_free_cq(ep->rep_attr.send_cq); ep->rep_attr.send_cq = NULL; - /* The ULP is responsible for ensuring all DMA - * mappings and MRs are gone. + /* The ib_drain_qp above guarantees that all posted + * Receives have flushed, which returns the transport's + * rpcrdma_reps to the rb_recv_bufs list. */ list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list) rpcrdma_dma_unmap_regbuf(rep->rr_rdmabuf); + + /* DMA mapping happens in ->send_request with the + * transport send lock held. Our caller is holding + * the transport send lock. + */ list_for_each_entry(req, &buf->rb_allreqs, rl_all) { rpcrdma_dma_unmap_regbuf(req->rl_rdmabuf); rpcrdma_dma_unmap_regbuf(req->rl_sendbuf); rpcrdma_dma_unmap_regbuf(req->rl_recvbuf); } + rpcrdma_mrs_destroy(buf); ib_dealloc_pd(ia->ri_pd); ia->ri_pd = NULL; @@ -1071,9 +1077,13 @@ struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf) struct rpcrdma_req * rpcrdma_req_create(struct rpcrdma_buffer *buffer, gfp_t flags) { + struct rpcrdma_ia *ia = rdmab_to_ia(buffer); struct rpcrdma_regbuf *rb; struct rpcrdma_req *req; + if (test_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags)) + return NULL; + req = kzalloc(sizeof(*req), flags); if (req == NULL) return NULL;