Split xfs_buf_allocate_memory into one helper that allocates from slab and one that allocates using the page allocator. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- fs/xfs/xfs_buf.c | 83 +++++++++++++++++++++++++----------------------- 1 file changed, 44 insertions(+), 39 deletions(-) diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 80be0333f077c0..ac85ec6f0a2fab 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -347,11 +347,41 @@ xfs_buf_free( kmem_cache_free(xfs_buf_zone, bp); } +static int +xfs_buf_alloc_slab( + struct xfs_buf *bp, + unsigned int flags) +{ + struct xfs_buftarg *btp = bp->b_target; + int align = xfs_buftarg_dma_alignment(btp); + size_t size = BBTOB(bp->b_length); + xfs_km_flags_t km_flags = KM_ZERO; + + if (!(flags & XBF_READ)) + km_flags |= KM_ZERO; + bp->b_addr = kmem_alloc_io(size, align, km_flags); + if (!bp->b_addr) + return -ENOMEM; + if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != + ((unsigned long)bp->b_addr & PAGE_MASK)) { + /* b_addr spans two pages - use alloc_page instead */ + kmem_free(bp->b_addr); + bp->b_addr = NULL; + return -ENOMEM; + } + bp->b_offset = offset_in_page(bp->b_addr); + bp->b_pages = bp->b_page_array; + bp->b_pages[0] = kmem_to_page(bp->b_addr); + bp->b_page_count = 1; + bp->b_flags |= _XBF_KMEM; + return 0; +} + /* * Allocates all the pages for buffer in question and builds it's page list. */ -STATIC int -xfs_buf_allocate_memory( +static int +xfs_buf_alloc_pages( struct xfs_buf *bp, uint flags) { @@ -361,47 +391,14 @@ xfs_buf_allocate_memory( unsigned short page_count, i; xfs_off_t start, end; int error; - xfs_km_flags_t kmflag_mask = 0; /* * assure zeroed buffer for non-read cases. */ - if (!(flags & XBF_READ)) { - kmflag_mask |= KM_ZERO; + if (!(flags & XBF_READ)) gfp_mask |= __GFP_ZERO; - } - /* - * for buffers that are contained within a single page, just allocate - * the memory from the heap - there's no need for the complexity of - * page arrays to keep allocation down to order 0. - */ size = BBTOB(bp->b_length); - if (size < PAGE_SIZE) { - int align_mask = xfs_buftarg_dma_alignment(bp->b_target); - bp->b_addr = kmem_alloc_io(size, align_mask, - KM_NOFS | kmflag_mask); - if (!bp->b_addr) { - /* low memory - use alloc_page loop instead */ - goto use_alloc_page; - } - - if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != - ((unsigned long)bp->b_addr & PAGE_MASK)) { - /* b_addr spans two pages - use alloc_page instead */ - kmem_free(bp->b_addr); - bp->b_addr = NULL; - goto use_alloc_page; - } - bp->b_offset = offset_in_page(bp->b_addr); - bp->b_pages = bp->b_page_array; - bp->b_pages[0] = kmem_to_page(bp->b_addr); - bp->b_page_count = 1; - bp->b_flags |= _XBF_KMEM; - return 0; - } - -use_alloc_page: start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -720,9 +717,17 @@ xfs_buf_get_map( if (error) return error; - error = xfs_buf_allocate_memory(new_bp, flags); - if (error) - goto out_free_buf; + /* + * For buffers that are contained within a single page, just allocate + * the memory from the heap - there's no need for the complexity of + * page arrays to keep allocation down to order 0. + */ + if (BBTOB(new_bp->b_length) >= PAGE_SIZE || + xfs_buf_alloc_slab(new_bp, flags) < 0) { + error = xfs_buf_alloc_pages(new_bp, flags); + if (error) + goto out_free_buf; + } error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp); if (error) -- 2.30.2