As David suggested: "We can allow all orders up to MAX_PAGECACHE_ORDER, since shmem_mapping_size_orders() handles it properly", therefore we can drop the 'MAX_PAGECACHE_ORDER' condition. Signed-off-by: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx> --- mm/shmem.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index a3203cf8860f..d54b24d65193 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -590,19 +590,19 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index struct vm_area_struct *vma, unsigned long vm_flags) { + unsigned int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ? + 0 : BIT(HPAGE_PMD_ORDER); unsigned long within_size_orders; unsigned int order; pgoff_t aligned_index; loff_t i_size; - if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) - return 0; if (!S_ISREG(inode->i_mode)) return 0; if (shmem_huge == SHMEM_HUGE_DENY) return 0; if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE) - return BIT(HPAGE_PMD_ORDER); + return maybe_pmd_order; /* * The huge order allocation for anon shmem is controlled through @@ -619,12 +619,12 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index switch (SHMEM_SB(inode->i_sb)->huge) { case SHMEM_HUGE_ALWAYS: if (vma) - return BIT(HPAGE_PMD_ORDER); + return maybe_pmd_order; return shmem_mapping_size_orders(inode->i_mapping, index, write_end); case SHMEM_HUGE_WITHIN_SIZE: if (vma) - within_size_orders = BIT(HPAGE_PMD_ORDER); + within_size_orders = maybe_pmd_order; else within_size_orders = shmem_mapping_size_orders(inode->i_mapping, index, write_end); @@ -642,7 +642,7 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index fallthrough; case SHMEM_HUGE_ADVISE: if (vm_flags & VM_HUGEPAGE) - return BIT(HPAGE_PMD_ORDER); + return maybe_pmd_order; fallthrough; default: return 0; -- 2.39.3