From: Zi Yan <ziy@xxxxxxxxxx> For other MAX_ORDER uses (described below), there is no need or too much hassle to convert certain static array to dynamic ones. Add MIN_MAX_ORDER to serve as compile time constant in place of MAX_ORDER. ARM64 hypervisor maintains its own free page list and does not import any core kernel symbols, so soon-to-be runtime variable MAX_ORDER is not accessible in ARM64 hypervisor code. Also there is no need to allocating very large pages. In SLAB/SLOB/SLUB, 2-D array kmalloc_caches uses MAX_ORDER in its second dimension. It is too much hassle to allocate memory for kmalloc_caches before any proper memory allocator is set up. Signed-off-by: Zi Yan <ziy@xxxxxxxxxx> Cc: Marc Zyngier <maz@xxxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Quentin Perret <qperret@xxxxxxxxxx> Cc: linux-arm-kernel@xxxxxxxxxxxxxxxxxxx Cc: kvmarm@xxxxxxxxxxxxxxxxxxxxx Cc: linux-mm@xxxxxxxxx Cc: linux-kernel@xxxxxxxxxxxxxxx --- arch/arm64/kvm/hyp/include/nvhe/gfp.h | 2 +- arch/arm64/kvm/hyp/nvhe/page_alloc.c | 2 +- include/linux/mmzone.h | 3 +++ include/linux/slab.h | 8 ++++---- mm/slab.c | 2 +- mm/slub.c | 6 +++--- 6 files changed, 13 insertions(+), 10 deletions(-) diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h index fe5472a184a3..29b92f68ab69 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/gfp.h +++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h @@ -16,7 +16,7 @@ struct hyp_pool { * API at EL2. */ hyp_spinlock_t lock; - struct list_head free_area[MAX_ORDER + 1]; + struct list_head free_area[MIN_MAX_ORDER + 1]; phys_addr_t range_start; phys_addr_t range_end; unsigned short max_order; diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c index d40f0b30b534..7ebbac3e2e76 100644 --- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c +++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c @@ -241,7 +241,7 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, int i; hyp_spin_lock_init(&pool->lock); - pool->max_order = min(MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT)); + pool->max_order = min(MIN_MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT)); for (i = 0; i < pool->max_order; i++) INIT_LIST_HEAD(&pool->free_area[i]); pool->range_start = phys; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 60d8cce2aed8..b5774e4c2700 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -26,10 +26,13 @@ /* Free memory management - zoned buddy allocator. */ #ifdef CONFIG_SET_MAX_ORDER #define MAX_ORDER CONFIG_SET_MAX_ORDER +#define MIN_MAX_ORDER CONFIG_SET_MAX_ORDER #elif CONFIG_ARCH_FORCE_MAX_ORDER != 0 #define MAX_ORDER CONFIG_ARCH_FORCE_MAX_ORDER +#define MIN_MAX_ORDER CONFIG_ARCH_FORCE_MAX_ORDER #else #define MAX_ORDER 10 +#define MIN_MAX_ORDER MAX_ORDER #endif #define MAX_ORDER_NR_PAGES (1 << MAX_ORDER) diff --git a/include/linux/slab.h b/include/linux/slab.h index 568b5dfb3bd9..e34b2c9bda09 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -251,8 +251,8 @@ static inline unsigned int arch_slab_minalign(void) * to do various tricks to work around compiler limitations in order to * ensure proper constant folding. */ -#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) <= 25 ? \ - (MAX_ORDER + PAGE_SHIFT) : 25) +#define KMALLOC_SHIFT_HIGH ((MIN_MAX_ORDER + PAGE_SHIFT) <= 25 ? \ + (MIN_MAX_ORDER + PAGE_SHIFT) : 25) #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 5 @@ -265,7 +265,7 @@ static inline unsigned int arch_slab_minalign(void) * (PAGE_SIZE*2). Larger requests are passed to the page allocator. */ #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) +#define KMALLOC_SHIFT_MAX (MIN_MAX_ORDER + PAGE_SHIFT) #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 3 #endif @@ -278,7 +278,7 @@ static inline unsigned int arch_slab_minalign(void) * be allocated from the same page. */ #define KMALLOC_SHIFT_HIGH PAGE_SHIFT -#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) +#define KMALLOC_SHIFT_MAX (MIN_MAX_ORDER + PAGE_SHIFT) #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 3 #endif diff --git a/mm/slab.c b/mm/slab.c index 530f418a4930..23798c32bb38 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -466,7 +466,7 @@ static int __init slab_max_order_setup(char *str) { get_option(&str, &slab_max_order); slab_max_order = slab_max_order < 0 ? 0 : - min(slab_max_order, MAX_ORDER); + min(slab_max_order, MIN_MAX_ORDER); slab_max_order_set = true; return 1; diff --git a/mm/slub.c b/mm/slub.c index 5acf5407cbc6..940fe48ea298 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3876,8 +3876,8 @@ static inline int calculate_order(unsigned int size) /* * Doh this slab cannot be placed using slub_max_order. */ - order = calc_slab_order(size, 1, MAX_ORDER, 1); - if (order <= MAX_ORDER) + order = calc_slab_order(size, 1, MIN_MAX_ORDER, 1); + if (order <= MIN_MAX_ORDER) return order; return -ENOSYS; } @@ -4388,7 +4388,7 @@ __setup("slub_min_order=", setup_slub_min_order); static int __init setup_slub_max_order(char *str) { get_option(&str, (int *)&slub_max_order); - slub_max_order = min_t(unsigned int, slub_max_order, MAX_ORDER); + slub_max_order = min_t(unsigned int, slub_max_order, MIN_MAX_ORDER); return 1; } -- 2.35.1