From: Chuck Lever <chuck.lever@xxxxxxxxxx> Consumers of sp_lock now all run in process context. We can avoid the overhead of disabling bottom halves when taking this lock. Signed-off-by: Chuck Lever <chuck.lever@xxxxxxxxxx> --- net/sunrpc/svc_xprt.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index ecbccf0d89b9..8ced7591ce07 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -472,9 +472,9 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) pool = svc_pool_for_cpu(xprt->xpt_server); percpu_counter_inc(&pool->sp_sockets_queued); - spin_lock_bh(&pool->sp_lock); + spin_lock(&pool->sp_lock); list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); - spin_unlock_bh(&pool->sp_lock); + spin_unlock(&pool->sp_lock); rqstp = svc_pool_wake_idle_thread(xprt->xpt_server, pool); if (!rqstp) { @@ -496,14 +496,14 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) if (list_empty(&pool->sp_sockets)) goto out; - spin_lock_bh(&pool->sp_lock); + spin_lock(&pool->sp_lock); if (likely(!list_empty(&pool->sp_sockets))) { xprt = list_first_entry(&pool->sp_sockets, struct svc_xprt, xpt_ready); list_del_init(&xprt->xpt_ready); svc_xprt_get(xprt); } - spin_unlock_bh(&pool->sp_lock); + spin_unlock(&pool->sp_lock); out: return xprt; } @@ -1116,15 +1116,15 @@ static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net) for (i = 0; i < serv->sv_nrpools; i++) { pool = &serv->sv_pools[i]; - spin_lock_bh(&pool->sp_lock); + spin_lock(&pool->sp_lock); list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) { if (xprt->xpt_net != net) continue; list_del_init(&xprt->xpt_ready); - spin_unlock_bh(&pool->sp_lock); + spin_unlock(&pool->sp_lock); return xprt; } - spin_unlock_bh(&pool->sp_lock); + spin_unlock(&pool->sp_lock); } return NULL; }