4.9-stable review patch. If anyone has any objections, please let me know. ------------------ From: Trond Myklebust <trond.myklebust@xxxxxxxxxxxxxxx> commit f57dcf4c72113c745d83f1c65f7291299f65c14f upstream. When we fail to add the request to the I/O queue, we currently leave it to the caller to free the failed request. However since some of the requests that fail are actually created by nfs_pageio_add_request() itself, and are not passed back the caller, this leads to a leakage issue, which can again cause page locks to leak. This commit addresses the leakage by freeing the created requests on error, using desc->pg_completion_ops->error_cleanup() Signed-off-by: Trond Myklebust <trond.myklebust@xxxxxxxxxxxxxxx> Fixes: a7d42ddb30997 ("nfs: add mirroring support to pgio layer") Cc: stable@xxxxxxxxxxxxxxx # v4.0: c18b96a1b862: nfs: clean up rest of reqs Cc: stable@xxxxxxxxxxxxxxx # v4.0: d600ad1f2bdb: NFS41: pop some layoutget Cc: stable@xxxxxxxxxxxxxxx # v4.0+ Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- fs/nfs/pagelist.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -975,6 +975,17 @@ static void nfs_pageio_doio(struct nfs_p } } +static void +nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc, + struct nfs_page *req) +{ + LIST_HEAD(head); + + nfs_list_remove_request(req); + nfs_list_add_request(req, &head); + desc->pg_completion_ops->error_cleanup(&head); +} + /** * nfs_pageio_add_request - Attempt to coalesce a request into a page list. * @desc: destination io descriptor @@ -1012,10 +1023,8 @@ static int __nfs_pageio_add_request(stru nfs_page_group_unlock(req); desc->pg_moreio = 1; nfs_pageio_doio(desc); - if (desc->pg_error < 0) - return 0; - if (mirror->pg_recoalesce) - return 0; + if (desc->pg_error < 0 || mirror->pg_recoalesce) + goto out_cleanup_subreq; /* retry add_request for this subreq */ nfs_page_group_lock(req, false); continue; @@ -1048,6 +1057,10 @@ err_ptr: desc->pg_error = PTR_ERR(subreq); nfs_page_group_unlock(req); return 0; +out_cleanup_subreq: + if (req != subreq) + nfs_pageio_cleanup_request(desc, subreq); + return 0; } static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) @@ -1141,11 +1154,14 @@ int nfs_pageio_add_request(struct nfs_pa if (nfs_pgio_has_mirroring(desc)) desc->pg_mirror_idx = midx; if (!nfs_pageio_add_request_mirror(desc, dupreq)) - goto out_failed; + goto out_cleanup_subreq; } return 1; +out_cleanup_subreq: + if (req != dupreq) + nfs_pageio_cleanup_request(desc, dupreq); out_failed: /* * We might have failed before sending any reqs over wire.