From: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Subject: mm: introduce page_size() Patch series "Make working with compound pages easier", v2. These three patches add three helpers and convert the appropriate places to use them. This patch (of 3): It's unnecessarily hard to find out the size of a potentially huge page. Replace 'PAGE_SIZE << compound_order(page)' with page_size(page). Link: http://lkml.kernel.org/r/20190721104612.19120-2-willy@xxxxxxxxxxxxx Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Acked-by: Michal Hocko <mhocko@xxxxxxxx> Reviewed-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Reviewed-by: Ira Weiny <ira.weiny@xxxxxxxxx> Acked-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/arm/mm/flush.c | 3 -- arch/arm64/mm/flush.c | 3 -- arch/ia64/mm/init.c | 2 - drivers/crypto/chelsio/chtls/chtls_io.c | 5 +--- drivers/staging/android/ion/ion_system_heap.c | 4 +-- drivers/target/tcm_fc/tfc_io.c | 3 -- fs/io_uring.c | 2 - include/linux/hugetlb.h | 2 - include/linux/mm.h | 6 +++++ lib/iov_iter.c | 2 - mm/kasan/common.c | 8 ++----- mm/nommu.c | 2 - mm/page_vma_mapped.c | 3 -- mm/rmap.c | 6 +---- mm/slob.c | 2 - mm/slub.c | 18 ++++++++-------- net/xdp/xsk.c | 2 - 17 files changed, 35 insertions(+), 38 deletions(-) --- a/arch/arm64/mm/flush.c~mm-introduce-page_size +++ a/arch/arm64/mm/flush.c @@ -56,8 +56,7 @@ void __sync_icache_dcache(pte_t pte) struct page *page = pte_page(pte); if (!test_and_set_bit(PG_dcache_clean, &page->flags)) - sync_icache_aliases(page_address(page), - PAGE_SIZE << compound_order(page)); + sync_icache_aliases(page_address(page), page_size(page)); } EXPORT_SYMBOL_GPL(__sync_icache_dcache); --- a/arch/arm/mm/flush.c~mm-introduce-page_size +++ a/arch/arm/mm/flush.c @@ -204,8 +204,7 @@ void __flush_dcache_page(struct address_ * coherent with the kernels mapping. */ if (!PageHighMem(page)) { - size_t page_size = PAGE_SIZE << compound_order(page); - __cpuc_flush_dcache_area(page_address(page), page_size); + __cpuc_flush_dcache_area(page_address(page), page_size(page)); } else { unsigned long i; if (cache_is_vipt_nonaliasing()) { --- a/arch/ia64/mm/init.c~mm-introduce-page_size +++ a/arch/ia64/mm/init.c @@ -64,7 +64,7 @@ __ia64_sync_icache_dcache (pte_t pte) if (test_bit(PG_arch_1, &page->flags)) return; /* i-cache is already coherent with d-cache */ - flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page))); + flush_icache_range(addr, addr + page_size(page)); set_bit(PG_arch_1, &page->flags); /* mark page as clean */ } --- a/drivers/crypto/chelsio/chtls/chtls_io.c~mm-introduce-page_size +++ a/drivers/crypto/chelsio/chtls/chtls_io.c @@ -1078,7 +1078,7 @@ new_buf: bool merge; if (page) - pg_size <<= compound_order(page); + pg_size = page_size(page); if (off < pg_size && skb_can_coalesce(skb, i, page, off)) { merge = 1; @@ -1105,8 +1105,7 @@ new_buf: __GFP_NORETRY, order); if (page) - pg_size <<= - compound_order(page); + pg_size <<= order; } if (!page) { page = alloc_page(gfp); --- a/drivers/staging/android/ion/ion_system_heap.c~mm-introduce-page_size +++ a/drivers/staging/android/ion/ion_system_heap.c @@ -120,7 +120,7 @@ static int ion_system_heap_allocate(stru if (!page) goto free_pages; list_add_tail(&page->lru, &pages); - size_remaining -= PAGE_SIZE << compound_order(page); + size_remaining -= page_size(page); max_order = compound_order(page); i++; } @@ -133,7 +133,7 @@ static int ion_system_heap_allocate(stru sg = table->sgl; list_for_each_entry_safe(page, tmp_page, &pages, lru) { - sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0); + sg_set_page(sg, page, page_size(page), 0); sg = sg_next(sg); list_del(&page->lru); } --- a/drivers/target/tcm_fc/tfc_io.c~mm-introduce-page_size +++ a/drivers/target/tcm_fc/tfc_io.c @@ -136,8 +136,7 @@ int ft_queue_data_in(struct se_cmd *se_c page, off_in_page, tlen); fr_len(fp) += tlen; fp_skb(fp)->data_len += tlen; - fp_skb(fp)->truesize += - PAGE_SIZE << compound_order(page); + fp_skb(fp)->truesize += page_size(page); } else { BUG_ON(!page); from = kmap_atomic(page + (mem_off >> PAGE_SHIFT)); --- a/fs/io_uring.c~mm-introduce-page_size +++ a/fs/io_uring.c @@ -3319,7 +3319,7 @@ static int io_uring_mmap(struct file *fi } page = virt_to_head_page(ptr); - if (sz > (PAGE_SIZE << compound_order(page))) + if (sz > page_size(page)) return -EINVAL; pfn = virt_to_phys(ptr) >> PAGE_SHIFT; --- a/include/linux/hugetlb.h~mm-introduce-page_size +++ a/include/linux/hugetlb.h @@ -454,7 +454,7 @@ static inline pte_t arch_make_huge_pte(p static inline struct hstate *page_hstate(struct page *page) { VM_BUG_ON_PAGE(!PageHuge(page), page); - return size_to_hstate(PAGE_SIZE << compound_order(page)); + return size_to_hstate(page_size(page)); } static inline unsigned hstate_index_to_shift(unsigned index) --- a/include/linux/mm.h~mm-introduce-page_size +++ a/include/linux/mm.h @@ -805,6 +805,12 @@ static inline void set_compound_order(st page[1].compound_order = order; } +/* Returns the number of bytes in this potentially compound page. */ +static inline unsigned long page_size(struct page *page) +{ + return PAGE_SIZE << compound_order(page); +} + void free_compound_page(struct page *page); #ifdef CONFIG_MMU --- a/lib/iov_iter.c~mm-introduce-page_size +++ a/lib/iov_iter.c @@ -878,7 +878,7 @@ static inline bool page_copy_sane(struct head = compound_head(page); v += (page - head) << PAGE_SHIFT; - if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head)))) + if (likely(n <= v && v <= (page_size(head)))) return true; WARN_ON(1); return false; --- a/mm/kasan/common.c~mm-introduce-page_size +++ a/mm/kasan/common.c @@ -338,8 +338,7 @@ void kasan_poison_slab(struct page *page for (i = 0; i < (1 << compound_order(page)); i++) page_kasan_tag_reset(page + i); - kasan_poison_shadow(page_address(page), - PAGE_SIZE << compound_order(page), + kasan_poison_shadow(page_address(page), page_size(page), KASAN_KMALLOC_REDZONE); } @@ -542,7 +541,7 @@ void * __must_check kasan_kmalloc_large( page = virt_to_page(ptr); redzone_start = round_up((unsigned long)(ptr + size), KASAN_SHADOW_SCALE_SIZE); - redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); + redzone_end = (unsigned long)ptr + page_size(page); kasan_unpoison_shadow(ptr, size); kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, @@ -578,8 +577,7 @@ void kasan_poison_kfree(void *ptr, unsig kasan_report_invalid_free(ptr, ip); return; } - kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), - KASAN_FREE_PAGE); + kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE); } else { __kasan_slab_free(page->slab_cache, ptr, ip, false); } --- a/mm/nommu.c~mm-introduce-page_size +++ a/mm/nommu.c @@ -108,7 +108,7 @@ unsigned int kobjsize(const void *objp) * The ksize() function is only guaranteed to work for pointers * returned by kmalloc(). So handle arbitrary pointers here. */ - return PAGE_SIZE << compound_order(page); + return page_size(page); } /** --- a/mm/page_vma_mapped.c~mm-introduce-page_size +++ a/mm/page_vma_mapped.c @@ -153,8 +153,7 @@ bool page_vma_mapped_walk(struct page_vm if (unlikely(PageHuge(pvmw->page))) { /* when pud is not present, pte will be NULL */ - pvmw->pte = huge_pte_offset(mm, pvmw->address, - PAGE_SIZE << compound_order(page)); + pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page)); if (!pvmw->pte) return false; --- a/mm/rmap.c~mm-introduce-page_size +++ a/mm/rmap.c @@ -898,8 +898,7 @@ static bool page_mkclean_one(struct page */ mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, vma, vma->vm_mm, address, - min(vma->vm_end, address + - (PAGE_SIZE << compound_order(page)))); + min(vma->vm_end, address + page_size(page))); mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(&pvmw)) { @@ -1372,8 +1371,7 @@ static bool try_to_unmap_one(struct page */ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, address, - min(vma->vm_end, address + - (PAGE_SIZE << compound_order(page)))); + min(vma->vm_end, address + page_size(page))); if (PageHuge(page)) { /* * If sharing is possible, start and end will be adjusted --- a/mm/slob.c~mm-introduce-page_size +++ a/mm/slob.c @@ -539,7 +539,7 @@ size_t __ksize(const void *block) sp = virt_to_page(block); if (unlikely(!PageSlab(sp))) - return PAGE_SIZE << compound_order(sp); + return page_size(sp); align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); m = (unsigned int *)(block - align); --- a/mm/slub.c~mm-introduce-page_size +++ a/mm/slub.c @@ -829,7 +829,7 @@ static int slab_pad_check(struct kmem_ca return 1; start = page_address(page); - length = PAGE_SIZE << compound_order(page); + length = page_size(page); end = start + length; remainder = length % s->size; if (!remainder) @@ -1074,13 +1074,14 @@ static void setup_object_debug(struct km init_tracking(s, object); } -static void setup_page_debug(struct kmem_cache *s, void *addr, int order) +static +void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) { if (!(s->flags & SLAB_POISON)) return; metadata_access_enable(); - memset(addr, POISON_INUSE, PAGE_SIZE << order); + memset(addr, POISON_INUSE, page_size(page)); metadata_access_disable(); } @@ -1340,8 +1341,8 @@ slab_flags_t kmem_cache_flags(unsigned i #else /* !CONFIG_SLUB_DEBUG */ static inline void setup_object_debug(struct kmem_cache *s, struct page *page, void *object) {} -static inline void setup_page_debug(struct kmem_cache *s, - void *addr, int order) {} +static inline +void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) {} static inline int alloc_debug_processing(struct kmem_cache *s, struct page *page, void *object, unsigned long addr) { return 0; } @@ -1639,7 +1640,7 @@ static struct page *allocate_slab(struct struct kmem_cache_order_objects oo = s->oo; gfp_t alloc_gfp; void *start, *p, *next; - int idx, order; + int idx; bool shuffle; flags &= gfp_allowed_mask; @@ -1673,7 +1674,6 @@ static struct page *allocate_slab(struct page->objects = oo_objects(oo); - order = compound_order(page); page->slab_cache = s; __SetPageSlab(page); if (page_is_pfmemalloc(page)) @@ -1683,7 +1683,7 @@ static struct page *allocate_slab(struct start = page_address(page); - setup_page_debug(s, start, order); + setup_page_debug(s, page, start); shuffle = shuffle_freelist(s, page); @@ -3932,7 +3932,7 @@ size_t __ksize(const void *object) if (unlikely(!PageSlab(page))) { WARN_ON(!PageCompound(page)); - return PAGE_SIZE << compound_order(page); + return page_size(page); } return slab_ksize(page->slab_cache); --- a/net/xdp/xsk.c~mm-introduce-page_size +++ a/net/xdp/xsk.c @@ -977,7 +977,7 @@ static int xsk_mmap(struct file *file, s /* Matches the smp_wmb() in xsk_init_queue */ smp_rmb(); qpg = virt_to_head_page(q->ring); - if (size > (PAGE_SIZE << compound_order(qpg))) + if (size > page_size(qpg)) return -EINVAL; pfn = virt_to_phys(q->ring) >> PAGE_SHIFT; _