In later patches we will add other accessor APIs which will take inode and folio to operate over struct iomap_page. Since we need folio's private (iomap_page) in those functions, hence this function moves detaching of folio's private at the end just before calling kfree(iop). Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@xxxxxxxxx> --- fs/iomap/buffered-io.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 4567bdd4fff9..6fffda355c45 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -71,7 +71,7 @@ static struct iomap_page *iomap_iop_alloc(struct inode *inode, static void iomap_iop_free(struct folio *folio) { - struct iomap_page *iop = folio_detach_private(folio); + struct iomap_page *iop = to_iomap_page(folio); struct inode *inode = folio->mapping->host; unsigned int nr_blocks = i_blocks_per_folio(inode, folio); @@ -81,6 +81,7 @@ static void iomap_iop_free(struct folio *folio) WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending)); WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) != folio_test_uptodate(folio)); + folio_detach_private(folio); kfree(iop); } -- 2.40.1