Rename the function to kmem_to_page and move it to kmem.h together with our kmem_large allocator that may either return kmalloced or vmalloc pages. Signed-off-by: Christoph Hellwig <hch@xxxxxx> Reviewed-by: Dave Chinner <dchinner@xxxxxxxxxx> --- fs/xfs/kmem.h | 8 ++++++++ fs/xfs/xfs_buf.c | 13 +------------ 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h index 8e6b3ba81c03..267655acd426 100644 --- a/fs/xfs/kmem.h +++ b/fs/xfs/kmem.h @@ -124,4 +124,12 @@ kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags) return kmem_zone_alloc(zone, flags | KM_ZERO); } +static inline struct page * +kmem_to_page(void *addr) +{ + if (is_vmalloc_addr(addr)) + return vmalloc_to_page(addr); + return virt_to_page(addr); +} + #endif /* __XFS_SUPPORT_KMEM_H__ */ diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 548344e25128..ade6ec28e1c9 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -933,17 +933,6 @@ xfs_buf_set_empty( bp->b_maps[0].bm_len = bp->b_length; } -static inline struct page * -mem_to_page( - void *addr) -{ - if ((!is_vmalloc_addr(addr))) { - return virt_to_page(addr); - } else { - return vmalloc_to_page(addr); - } -} - int xfs_buf_associate_memory( xfs_buf_t *bp, @@ -976,7 +965,7 @@ xfs_buf_associate_memory( bp->b_offset = offset; for (i = 0; i < bp->b_page_count; i++) { - bp->b_pages[i] = mem_to_page((void *)pageaddr); + bp->b_pages[i] = kmem_to_page((void *)pageaddr); pageaddr += PAGE_SIZE; } -- 2.20.1