From: David Howells <dhowells@xxxxxxxxxx> [ Upstream commit 5e51c627c5acbcf82bb552e17533a79d2a6a2600 ] Fix the dodgy maths in netfs_rreq_unlock_folios(). start_page could be inside the folio, in which case the calculation of pgpos will be come up with a negative number (though for the moment rreq->start is rounded down earlier and folios would have to get merged whilst locked) Alter how this works to just frame the tracking in terms of absolute file positions, rather than offsets from the start of the I/O request. This simplifies the maths and makes it easier to follow. Fix the issue by using folio_pos() and folio_size() to calculate the end position of the page. Fixes: 3d3c95046742 ("netfs: Provide readahead and readpage netfs helpers") Reported-by: Matthew Wilcox <willy@xxxxxxxxxxxxx> Signed-off-by: David Howells <dhowells@xxxxxxxxxx> Reviewed-by: Jeff Layton <jlayton@xxxxxxxxxx> Reviewed-by: Jingbo Xu <jefflexu@xxxxxxxxxxxxxxxxx> cc: linux-cachefs@xxxxxxxxxx cc: linux-fsdevel@xxxxxxxxxxxxxxx Link: https://lore.kernel.org/r/Y2SJw7w1IsIik3nb@xxxxxxxxxxxxxxxxxxxx/ Link: https://lore.kernel.org/r/166757988611.950645.7626959069846893164.stgit@xxxxxxxxxxxxxxxxxxxxxx/ # v2 Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> --- fs/netfs/buffered_read.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index baf668fb4315..7679a68e8193 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -17,9 +17,9 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) { struct netfs_io_subrequest *subreq; struct folio *folio; - unsigned int iopos, account = 0; pgoff_t start_page = rreq->start / PAGE_SIZE; pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1; + size_t account = 0; bool subreq_failed = false; XA_STATE(xas, &rreq->mapping->i_pages, start_page); @@ -39,23 +39,23 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) */ subreq = list_first_entry(&rreq->subrequests, struct netfs_io_subrequest, rreq_link); - iopos = 0; subreq_failed = (subreq->error < 0); trace_netfs_rreq(rreq, netfs_rreq_trace_unlock); rcu_read_lock(); xas_for_each(&xas, folio, last_page) { - unsigned int pgpos, pgend; + loff_t pg_end; bool pg_failed = false; if (xas_retry(&xas, folio)) continue; - pgpos = (folio_index(folio) - start_page) * PAGE_SIZE; - pgend = pgpos + folio_size(folio); + pg_end = folio_pos(folio) + folio_size(folio) - 1; for (;;) { + loff_t sreq_end; + if (!subreq) { pg_failed = true; break; @@ -63,11 +63,11 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) folio_start_fscache(folio); pg_failed |= subreq_failed; - if (pgend < iopos + subreq->len) + sreq_end = subreq->start + subreq->len - 1; + if (pg_end < sreq_end) break; account += subreq->transferred; - iopos += subreq->len; if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) { subreq = list_next_entry(subreq, rreq_link); subreq_failed = (subreq->error < 0); @@ -75,7 +75,8 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) subreq = NULL; subreq_failed = false; } - if (pgend == iopos) + + if (pg_end == sreq_end) break; } -- 2.35.1