From: Dave Chinner <dchinner@xxxxxxxxxx> Source kernel commit: de631e1a8b71017b8a12b57d07db82e4052555af Pankaj Raghav reported that when filesystem block size is larger than page size, the xattr code can use kmalloc() for high order allocations. This triggers a useless warning in the allocator as it is a __GFP_NOFAIL allocation here: static inline struct page *rmqueue(struct zone *preferred_zone, struct zone *zone, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags, int migratetype) { struct page *page; /* * We most definitely don't want callers attempting to * allocate greater than order-1 page units with __GFP_NOFAIL. */ >>>> WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); ... Fix this by changing all these call sites to use kvmalloc(), which will strip the NOFAIL from the kmalloc attempt and if that fails will do a __GFP_NOFAIL vmalloc(). This is not an issue that productions systems will see as filesystems with block size > page size cannot be mounted by the kernel; Pankaj is developing this functionality right now. Reported-by: Pankaj Raghav <kernel@xxxxxxxxxxxxxxxx> Fixes: f078d4ea8276 ("xfs: convert kmem_alloc() to kmalloc()") Signed-off-by: Dave Chinner <dchinner@xxxxxxxxxx> Link: https://lore.kernel.org/r/20240822135018.1931258-8-kernel@xxxxxxxxxxxxxxxx Reviewed-by: Darrick J. Wong <djwong@xxxxxxxxxx> Reviewed-by: Pankaj Raghav <p.raghav@xxxxxxxxxxx> Reviewed-by: Hannes Reinecke <hare@xxxxxxx> Signed-off-by: Christoph Hellwig <hch@xxxxxx> Acked-by: Darrick J. Wong <djwong@xxxxxxxxxx> Reviewed-by: Daniel Gomez <da.gomez@xxxxxxxxxxx> Reviewed-by: Dave Chinner <dchinner@xxxxxxxxxx> Reviewed-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Signed-off-by: Christian Brauner <brauner@xxxxxxxxxx> --- include/kmem.h | 6 ++++++ libxfs/xfs_attr_leaf.c | 15 ++++++--------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/include/kmem.h b/include/kmem.h index 8dfb2fb0b45020..8739d824008e2a 100644 --- a/include/kmem.h +++ b/include/kmem.h @@ -59,12 +59,18 @@ static inline void *kmalloc(size_t size, gfp_t flags) } #define kzalloc(size, gfp) kvmalloc((size), (gfp) | __GFP_ZERO) +#define kvzalloc(size, gfp) kzalloc((size), (gfp)) static inline void kfree(const void *ptr) { free((void *)ptr); } +static inline void kvfree(const void *ptr) +{ + kfree(ptr); +} + __attribute__((format(printf,2,3))) char *kasprintf(gfp_t gfp, const char *fmt, ...); diff --git a/libxfs/xfs_attr_leaf.c b/libxfs/xfs_attr_leaf.c index 97b71b6500bdc9..db2e48d719d36f 100644 --- a/libxfs/xfs_attr_leaf.c +++ b/libxfs/xfs_attr_leaf.c @@ -1135,10 +1135,7 @@ xfs_attr3_leaf_to_shortform( trace_xfs_attr_leaf_to_sf(args); - tmpbuffer = kmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL); - if (!tmpbuffer) - return -ENOMEM; - + tmpbuffer = kvmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL); memcpy(tmpbuffer, bp->b_addr, args->geo->blksize); leaf = (xfs_attr_leafblock_t *)tmpbuffer; @@ -1202,7 +1199,7 @@ xfs_attr3_leaf_to_shortform( error = 0; out: - kfree(tmpbuffer); + kvfree(tmpbuffer); return error; } @@ -1610,7 +1607,7 @@ xfs_attr3_leaf_compact( trace_xfs_attr_leaf_compact(args); - tmpbuffer = kmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL); + tmpbuffer = kvmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL); memcpy(tmpbuffer, bp->b_addr, args->geo->blksize); memset(bp->b_addr, 0, args->geo->blksize); leaf_src = (xfs_attr_leafblock_t *)tmpbuffer; @@ -1648,7 +1645,7 @@ xfs_attr3_leaf_compact( */ xfs_trans_log_buf(trans, bp, 0, args->geo->blksize - 1); - kfree(tmpbuffer); + kvfree(tmpbuffer); } /* @@ -2327,7 +2324,7 @@ xfs_attr3_leaf_unbalance( struct xfs_attr_leafblock *tmp_leaf; struct xfs_attr3_icleaf_hdr tmphdr; - tmp_leaf = kzalloc(state->args->geo->blksize, + tmp_leaf = kvzalloc(state->args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL); /* @@ -2368,7 +2365,7 @@ xfs_attr3_leaf_unbalance( } memcpy(save_leaf, tmp_leaf, state->args->geo->blksize); savehdr = tmphdr; /* struct copy */ - kfree(tmp_leaf); + kvfree(tmp_leaf); } xfs_attr3_leaf_hdr_to_disk(state->args->geo, save_leaf, &savehdr);