To avoid extra trips to the page allocator, don't free unused pages in nfsd_splice_actor(), but instead place them in a local cache. That cache is then used first when refilling rq_pages. On workloads that perform large NFS READs on splice-capable file systems, this saves a considerable amount of work. Suggested-by: NeilBrown <neilb@xxxxxxx> Signed-off-by: Chuck Lever <chuck.lever@xxxxxxxxxx> --- fs/nfsd/vfs.c | 4 ++-- include/linux/sunrpc/svc.h | 1 + include/linux/sunrpc/svc_xprt.h | 28 ++++++++++++++++++++++++++++ net/sunrpc/svc.c | 7 +++++++ net/sunrpc/svc_xprt.c | 12 ++++++++++++ 5 files changed, 50 insertions(+), 2 deletions(-) diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index d316e11923c5..25cf41eaf3c4 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -852,14 +852,14 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, if (rqstp->rq_res.page_len == 0) { get_page(page); - put_page(*rqstp->rq_next_page); + svc_rqst_put_unused_page(rqstp, *rqstp->rq_next_page); *(rqstp->rq_next_page++) = page; rqstp->rq_res.page_base = buf->offset; rqstp->rq_res.page_len = size; } else if (page != pp[-1]) { get_page(page); if (*rqstp->rq_next_page) - put_page(*rqstp->rq_next_page); + svc_rqst_put_unused_page(rqstp, *rqstp->rq_next_page); *(rqstp->rq_next_page++) = page; rqstp->rq_res.page_len += size; } else diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 31ee3b6047c3..340f4f3989c0 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -250,6 +250,7 @@ struct svc_rqst { struct xdr_stream rq_arg_stream; struct page *rq_scratch_page; struct xdr_buf rq_res; + struct list_head rq_unused_pages; struct page *rq_pages[RPCSVC_MAXPAGES + 1]; struct page * *rq_respages; /* points into rq_pages */ struct page * *rq_next_page; /* next reply page to use */ diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 571f605bc91e..49ef86499876 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -150,6 +150,34 @@ static inline void svc_xprt_get(struct svc_xprt *xprt) { kref_get(&xprt->xpt_ref); } + +/** + * svc_rqst_get_unused_page - Tap a page from the local cache + * @rqstp: svc_rqst with cached unused pages + * + * To save an allocator round trip, pages can be added to a + * local cache and re-used later by svc_alloc_arg(). + * + * Returns an unused page, or NULL if the cache is empty. + */ +static inline struct page *svc_rqst_get_unused_page(struct svc_rqst *rqstp) +{ + return list_first_entry_or_null(&rqstp->rq_unused_pages, + struct page, lru); +} + +/** + * svc_rqst_put_unused_page - Stash a page in the local cache + * @rqstp: svc_rqst with cached unused pages + * @page: page to cache + * + */ +static inline void svc_rqst_put_unused_page(struct svc_rqst *rqstp, + struct page *page) +{ + list_add(&page->lru, &rqstp->rq_unused_pages); +} + static inline void svc_xprt_set_local(struct svc_xprt *xprt, const struct sockaddr *sa, const size_t salen) diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 61fb8a18552c..3920fa8f1146 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -570,6 +570,8 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node) if (svc_is_backchannel(rqstp)) return 1; + INIT_LIST_HEAD(&rqstp->rq_unused_pages); + pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. * We assume one is at most one page */ @@ -593,8 +595,13 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node) static void svc_release_buffer(struct svc_rqst *rqstp) { + struct page *page; unsigned int i; + while ((page = svc_rqst_get_unused_page(rqstp))) { + list_del(&page->lru); + put_page(page); + } for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++) if (rqstp->rq_pages[i]) put_page(rqstp->rq_pages[i]); diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 15aacfa5ca21..84210e546a66 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -678,6 +678,18 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) for (needed = 0, i = 0; i < pages ; i++) if (!rqstp->rq_pages[i]) needed++; + if (needed) { + for (i = 0; i < pages; i++) { + if (!rqstp->rq_pages[i]) { + page = svc_rqst_get_unused_page(rqstp); + if (!page) + break; + list_del(&page->lru); + rqstp->rq_pages[i] = page; + needed--; + } + } + } if (needed) { LIST_HEAD(list);