Hi, Matthew, On 10.05.2019 21:12, Matthew Wilcox wrote: > From: "Matthew Wilcox (Oracle)" <willy@xxxxxxxxxxxxx> > > It's unnecessarily hard to find out the size of a potentially large page. > Replace 'PAGE_SIZE << compound_order(page)' with 'page_size(page)'. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> > --- > arch/arm/mm/flush.c | 3 +-- > arch/arm64/mm/flush.c | 3 +-- > arch/ia64/mm/init.c | 2 +- > drivers/staging/android/ion/ion_system_heap.c | 4 ++-- > drivers/target/tcm_fc/tfc_io.c | 3 +-- > fs/io_uring.c | 2 +- > include/linux/hugetlb.h | 2 +- > include/linux/mm.h | 9 +++++++++ > lib/iov_iter.c | 2 +- > mm/kasan/common.c | 8 +++----- > mm/nommu.c | 2 +- > mm/page_vma_mapped.c | 3 +-- > mm/rmap.c | 6 ++---- > mm/slob.c | 2 +- > mm/slub.c | 4 ++-- > net/xdp/xsk.c | 2 +- > 16 files changed, 29 insertions(+), 28 deletions(-) > > diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c > index 58469623b015..c68a120de28b 100644 > --- a/arch/arm/mm/flush.c > +++ b/arch/arm/mm/flush.c > @@ -207,8 +207,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) > * coherent with the kernels mapping. > */ > if (!PageHighMem(page)) { > - size_t page_size = PAGE_SIZE << compound_order(page); > - __cpuc_flush_dcache_area(page_address(page), page_size); > + __cpuc_flush_dcache_area(page_address(page), page_size(page)); > } else { > unsigned long i; > if (cache_is_vipt_nonaliasing()) { > diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c > index 5c9073bace83..280fdbc3bfa5 100644 > --- a/arch/arm64/mm/flush.c > +++ b/arch/arm64/mm/flush.c > @@ -67,8 +67,7 @@ void __sync_icache_dcache(pte_t pte) > struct page *page = pte_page(pte); > > if (!test_and_set_bit(PG_dcache_clean, &page->flags)) > - sync_icache_aliases(page_address(page), > - PAGE_SIZE << compound_order(page)); > + sync_icache_aliases(page_address(page), page_size(page)); > } > EXPORT_SYMBOL_GPL(__sync_icache_dcache); > > diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c > index d28e29103bdb..cc4061cd9899 100644 > --- a/arch/ia64/mm/init.c > +++ b/arch/ia64/mm/init.c > @@ -63,7 +63,7 @@ __ia64_sync_icache_dcache (pte_t pte) > if (test_bit(PG_arch_1, &page->flags)) > return; /* i-cache is already coherent with d-cache */ > > - flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page))); > + flush_icache_range(addr, addr + page_size(page)); > set_bit(PG_arch_1, &page->flags); /* mark page as clean */ > } > > diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c > index aa8d8425be25..b83a1d16bd89 100644 > --- a/drivers/staging/android/ion/ion_system_heap.c > +++ b/drivers/staging/android/ion/ion_system_heap.c > @@ -120,7 +120,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap, > if (!page) > goto free_pages; > list_add_tail(&page->lru, &pages); > - size_remaining -= PAGE_SIZE << compound_order(page); > + size_remaining -= page_size(page); > max_order = compound_order(page); > i++; > } > @@ -133,7 +133,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap, > > sg = table->sgl; > list_for_each_entry_safe(page, tmp_page, &pages, lru) { > - sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0); > + sg_set_page(sg, page, page_size(page), 0); > sg = sg_next(sg); > list_del(&page->lru); > } > diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c > index 1eb1f58e00e4..83c1ec65dbcc 100644 > --- a/drivers/target/tcm_fc/tfc_io.c > +++ b/drivers/target/tcm_fc/tfc_io.c > @@ -148,8 +148,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) > page, off_in_page, tlen); > fr_len(fp) += tlen; > fp_skb(fp)->data_len += tlen; > - fp_skb(fp)->truesize += > - PAGE_SIZE << compound_order(page); > + fp_skb(fp)->truesize += page_size(page); > } else { > BUG_ON(!page); > from = kmap_atomic(page + (mem_off >> PAGE_SHIFT)); > diff --git a/fs/io_uring.c b/fs/io_uring.c > index fdc18321d70c..2c37da095517 100644 > --- a/fs/io_uring.c > +++ b/fs/io_uring.c > @@ -2891,7 +2891,7 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) > } > > page = virt_to_head_page(ptr); > - if (sz > (PAGE_SIZE << compound_order(page))) > + if (sz > page_size(page)) > return -EINVAL; > > pfn = virt_to_phys(ptr) >> PAGE_SHIFT; > diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h > index edf476c8cfb9..2e909072a41f 100644 > --- a/include/linux/hugetlb.h > +++ b/include/linux/hugetlb.h > @@ -472,7 +472,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, > static inline struct hstate *page_hstate(struct page *page) > { > VM_BUG_ON_PAGE(!PageHuge(page), page); > - return size_to_hstate(PAGE_SIZE << compound_order(page)); > + return size_to_hstate(page_size(page)); > } > > static inline unsigned hstate_index_to_shift(unsigned index) > diff --git a/include/linux/mm.h b/include/linux/mm.h > index 0e8834ac32b7..0208f77bab63 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -772,6 +772,15 @@ static inline void set_compound_order(struct page *page, unsigned int order) > page[1].compound_order = order; > } > > +/* > + * Returns the number of bytes in this potentially compound page. > + * Must be called with the head page, not a tail page. > + */ > +static inline unsigned long page_size(struct page *page) > +{ Maybe we should underline commented head page limitation with VM_BUG_ON()? Kirill