On Mon, 2024-01-22 at 12:38 +0000, David Howells wrote: > Filesystems should not be using folio->index not folio_index(folio) and I think you mean "should be" here. > folio->mapping, not folio_mapping() or folio_file_mapping() in filesystem > code. > > Change this automagically with: > > perl -p -i -e 's/folio_mapping[(]([^)]*)[)]/\1->mapping/g' fs/netfs/*.c > perl -p -i -e 's/folio_file_mapping[(]([^)]*)[)]/\1->mapping/g' fs/netfs/*.c > perl -p -i -e 's/folio_index[(]([^)]*)[)]/\1->index/g' fs/netfs/*.c > > Reported-by: Matthew Wilcox <willy@xxxxxxxxxxxxx> > Signed-off-by: David Howells <dhowells@xxxxxxxxxx> > cc: Jeff Layton <jlayton@xxxxxxxxxx> > cc: linux-afs@xxxxxxxxxxxxxxxxxxx > cc: linux-cachefs@xxxxxxxxxx > cc: linux-cifs@xxxxxxxxxxxxxxx > cc: linux-erofs@xxxxxxxxxxxxxxxx > cc: linux-fsdevel@xxxxxxxxxxxxxxx > --- > fs/netfs/buffered_read.c | 12 ++++++------ > fs/netfs/buffered_write.c | 10 +++++----- > fs/netfs/io.c | 2 +- > fs/netfs/misc.c | 2 +- > 4 files changed, 13 insertions(+), 13 deletions(-) > > diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c > index a59e7b2edaac..3298c29b5548 100644 > --- a/fs/netfs/buffered_read.c > +++ b/fs/netfs/buffered_read.c > @@ -101,7 +101,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) > } > > if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { > - if (folio_index(folio) == rreq->no_unlock_folio && > + if (folio->index == rreq->no_unlock_folio && > test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) > _debug("no unlock"); > else > @@ -246,13 +246,13 @@ EXPORT_SYMBOL(netfs_readahead); > */ > int netfs_read_folio(struct file *file, struct folio *folio) > { > - struct address_space *mapping = folio_file_mapping(folio); > + struct address_space *mapping = folio->mapping; > struct netfs_io_request *rreq; > struct netfs_inode *ctx = netfs_inode(mapping->host); > struct folio *sink = NULL; > int ret; > > - _enter("%lx", folio_index(folio)); > + _enter("%lx", folio->index); > > rreq = netfs_alloc_request(mapping, file, > folio_file_pos(folio), folio_size(folio), > @@ -460,7 +460,7 @@ int netfs_write_begin(struct netfs_inode *ctx, > ret = PTR_ERR(rreq); > goto error; > } > - rreq->no_unlock_folio = folio_index(folio); > + rreq->no_unlock_folio = folio->index; > __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); > > ret = netfs_begin_cache_read(rreq, ctx); > @@ -518,7 +518,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio, > size_t offset, size_t len) > { > struct netfs_io_request *rreq; > - struct address_space *mapping = folio_file_mapping(folio); > + struct address_space *mapping = folio->mapping; > struct netfs_inode *ctx = netfs_inode(mapping->host); > unsigned long long start = folio_pos(folio); > size_t flen = folio_size(folio); > @@ -535,7 +535,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio, > goto error; > } > > - rreq->no_unlock_folio = folio_index(folio); > + rreq->no_unlock_folio = folio->index; > __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); > ret = netfs_begin_cache_read(rreq, ctx); > if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) > diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c > index 93dc76f34e39..e7f9ba6fb16b 100644 > --- a/fs/netfs/buffered_write.c > +++ b/fs/netfs/buffered_write.c > @@ -343,7 +343,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, > break; > default: > WARN(true, "Unexpected modify type %u ix=%lx\n", > - howto, folio_index(folio)); > + howto, folio->index); > ret = -EIO; > goto error_folio_unlock; > } > @@ -648,7 +648,7 @@ static void netfs_pages_written_back(struct netfs_io_request *wreq) > xas_for_each(&xas, folio, last) { > WARN(!folio_test_writeback(folio), > "bad %zx @%llx page %lx %lx\n", > - wreq->len, wreq->start, folio_index(folio), last); > + wreq->len, wreq->start, folio->index, last); > > if ((finfo = netfs_folio_info(folio))) { > /* Streaming writes cannot be redirtied whilst under > @@ -795,7 +795,7 @@ static void netfs_extend_writeback(struct address_space *mapping, > continue; > if (xa_is_value(folio)) > break; > - if (folio_index(folio) != index) { > + if (folio->index != index) { > xas_reset(xas); > break; > } > @@ -901,7 +901,7 @@ static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping, > long count = wbc->nr_to_write; > int ret; > > - _enter(",%lx,%llx-%llx,%u", folio_index(folio), start, end, caching); > + _enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching); > > wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio), > NETFS_WRITEBACK); > @@ -1047,7 +1047,7 @@ static ssize_t netfs_writepages_begin(struct address_space *mapping, > > start = folio_pos(folio); /* May regress with THPs */ > > - _debug("wback %lx", folio_index(folio)); > + _debug("wback %lx", folio->index); > > /* At this point we hold neither the i_pages lock nor the page lock: > * the page may be truncated or invalidated (changing page->mapping to > diff --git a/fs/netfs/io.c b/fs/netfs/io.c > index 4309edf33862..e8ff1e61ce79 100644 > --- a/fs/netfs/io.c > +++ b/fs/netfs/io.c > @@ -124,7 +124,7 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq, > /* We might have multiple writes from the same huge > * folio, but we mustn't unlock a folio more than once. > */ > - if (have_unlocked && folio_index(folio) <= unlocked) > + if (have_unlocked && folio->index <= unlocked) > continue; > unlocked = folio_next_index(folio) - 1; > trace_netfs_folio(folio, netfs_folio_trace_end_copy); > diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c > index 0e3af37fc924..90051ced8e2a 100644 > --- a/fs/netfs/misc.c > +++ b/fs/netfs/misc.c > @@ -180,7 +180,7 @@ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length) > struct netfs_folio *finfo = NULL; > size_t flen = folio_size(folio); > > - _enter("{%lx},%zx,%zx", folio_index(folio), offset, length); > + _enter("{%lx},%zx,%zx", folio->index, offset, length); > > folio_wait_fscache(folio); > > -- Jeff Layton <jlayton@xxxxxxxxxx>