Just move the transport locking out of the spin lock protected area altogether. Signed-off-by: Trond Myklebust <trond.myklebust@xxxxxxxxxxxxxxx> --- net/sunrpc/svc_xprt.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index b334cf71be3a..7eff6938f814 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -346,18 +346,6 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt) if (!svc_xprt_has_something_to_do(xprt)) return; - cpu = get_cpu(); - pool = svc_pool_for_cpu(xprt->xpt_server, cpu); - spin_lock_bh(&pool->sp_lock); - - if (!list_empty(&pool->sp_threads) && - !list_empty(&pool->sp_sockets)) - printk(KERN_ERR - "svc_xprt_enqueue: " - "threads and transports both waiting??\n"); - - pool->sp_stats.packets++; - /* Mark transport as busy. It will remain in this state until * the provider calls svc_xprt_received. We update XPT_BUSY * atomically because it also guards against trying to enqueue @@ -366,9 +354,15 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt) if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { /* Don't enqueue transport while already enqueued */ dprintk("svc: transport %p busy, not enqueued\n", xprt); - goto out_unlock; + return; } + cpu = get_cpu(); + pool = svc_pool_for_cpu(xprt->xpt_server, cpu); + spin_lock_bh(&pool->sp_lock); + + pool->sp_stats.packets++; + if (!list_empty(&pool->sp_threads)) { rqstp = list_entry(pool->sp_threads.next, struct svc_rqst, @@ -395,7 +389,6 @@ static void svc_xprt_do_enqueue(struct svc_xprt *xprt) pool->sp_stats.sockets_queued++; } -out_unlock: spin_unlock_bh(&pool->sp_lock); put_cpu(); } -- 1.9.3 -- To unsubscribe from this list: send the line "unsubscribe linux-nfs" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html