#syz test: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git v6.13-rc1 netfs: Fix enomem handling in buffered reads If netfs_read_to_pagecache() gets an error from either ->prepare_read() or from netfs_prepare_read_iterator(), it needs to decrement ->nr_outstanding, cancel the subrequest and break out of the issuing loop. Currently, it only does this for two of the cases, but there are two more that aren't handled. Fix this by moving the handling to a common place and jumping to it from all four places. This is in preference to inserting a wrapper around netfs_prepare_read_iterator() as proposed by Dmitry Antipov[1]. Fixes: ee4cdf7ba857 ("netfs: Speed up buffered reading") Reported-by: syzbot+404b4b745080b6210c6c@xxxxxxxxxxxxxxxxxxxxxxxxx Closes: https://syzkaller.appspot.com/bug?extid=404b4b745080b6210c6c Signed-off-by: David Howells <dhowells@xxxxxxxxxx> cc: Dmitry Antipov <dmantipov@xxxxxxxxx> cc: Jeff Layton <jlayton@xxxxxxxxxx> cc: netfs@xxxxxxxxxxxxxxx cc: linux-fsdevel@xxxxxxxxxxxxxxx Link: https://lore.kernel.org/r/20241202093943.227786-1-dmantipov@xxxxxxxxx/ [1] --- fs/netfs/buffered_read.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index 7ac34550c403..e5c7dd5a4c90 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -275,22 +275,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq) netfs_stat(&netfs_n_rh_download); if (rreq->netfs_ops->prepare_read) { ret = rreq->netfs_ops->prepare_read(subreq); - if (ret < 0) { - atomic_dec(&rreq->nr_outstanding); - netfs_put_subrequest(subreq, false, - netfs_sreq_trace_put_cancel); - break; - } + if (ret < 0) + goto prep_failed; trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); } slice = netfs_prepare_read_iterator(subreq); - if (slice < 0) { - atomic_dec(&rreq->nr_outstanding); - netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); - ret = slice; - break; - } + if (slice < 0) + goto prep_failed; rreq->netfs_ops->issue_read(subreq); goto done; @@ -302,6 +294,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq) trace_netfs_sreq(subreq, netfs_sreq_trace_submit); netfs_stat(&netfs_n_rh_zero); slice = netfs_prepare_read_iterator(subreq); + if (slice < 0) + goto prep_failed; __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); netfs_read_subreq_terminated(subreq, 0, false); goto done; @@ -310,6 +304,8 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq) if (source == NETFS_READ_FROM_CACHE) { trace_netfs_sreq(subreq, netfs_sreq_trace_submit); slice = netfs_prepare_read_iterator(subreq); + if (slice < 0) + goto prep_failed; netfs_read_cache_to_pagecache(rreq, subreq); goto done; } @@ -318,6 +314,13 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq) WARN_ON_ONCE(1); break; + prep_failed: + ret = slice; + subreq->error = ret; + atomic_dec(&rreq->nr_outstanding); + netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); + break; + done: size -= slice; start += slice;