From: Chuck Lever <chuck.lever@xxxxxxxxxx> Reduce the rate at which nfsd threads hammer on the page allocator. This improves throughput scalability by enabling the threads to run more independently of each other. [mgorman: Update interpretation of alloc_pages_bulk return value] Signed-off-by: Chuck Lever <chuck.lever@xxxxxxxxxx> Signed-off-by: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> --- net/sunrpc/svc_xprt.c | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 609bda97d4ae..0c27c3291ca1 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -643,30 +643,29 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; struct xdr_buf *arg = &rqstp->rq_arg; - int pages; - int i; + unsigned long pages, filled; - /* now allocate needed pages. If we get a failure, sleep briefly */ pages = (serv->sv_max_mesg + 2 * PAGE_SIZE) >> PAGE_SHIFT; if (pages > RPCSVC_MAXPAGES) { - pr_warn_once("svc: warning: pages=%u > RPCSVC_MAXPAGES=%lu\n", + pr_warn_once("svc: warning: pages=%lu > RPCSVC_MAXPAGES=%lu\n", pages, RPCSVC_MAXPAGES); /* use as many pages as possible */ pages = RPCSVC_MAXPAGES; } - for (i = 0; i < pages ; i++) - while (rqstp->rq_pages[i] == NULL) { - struct page *p = alloc_page(GFP_KERNEL); - if (!p) { - set_current_state(TASK_INTERRUPTIBLE); - if (signalled() || kthread_should_stop()) { - set_current_state(TASK_RUNNING); - return -EINTR; - } - schedule_timeout(msecs_to_jiffies(500)); - } - rqstp->rq_pages[i] = p; + + for (;;) { + filled = alloc_pages_bulk_array(GFP_KERNEL, pages, + rqstp->rq_pages); + if (filled == pages) + break; + + set_current_state(TASK_INTERRUPTIBLE); + if (signalled() || kthread_should_stop()) { + set_current_state(TASK_RUNNING); + return -EINTR; } + schedule_timeout(msecs_to_jiffies(500)); + } rqstp->rq_page_end = &rqstp->rq_pages[pages]; rqstp->rq_pages[pages] = NULL; /* this might be seen in nfsd_splice_actor() */ -- 2.26.2