Replace the areas parameter with a more generic flags parameter. This allows for up to 16 allocation areas and 16 allocation flags. This patch introduces the flags and changes the names of the funcions, subsequent patches will actually wire up the flags to do something. The first two flags introduced are: - FLAG_DONTZERO to ask the allocated memory not to be zeroed - FLAG_FRESH to indicate that the allocated memory should have not been touched (READ or written to) in any way since boot. This patch also fixes the order of arguments to consistently have alignment first and then size, thereby fixing a bug where the two values would get swapped. Fixes: 8131e91a4b61 ("lib/alloc_page: complete rewrite of the page allocator") Signed-off-by: Claudio Imbrenda <imbrenda@xxxxxxxxxxxxx> Reviewed-by: Krish Sadhukhan <krish.sadhukhan@xxxxxxxxxx> --- lib/alloc_page.h | 39 ++++++++++++++++++++++----------------- lib/alloc_page.c | 16 ++++++++-------- lib/s390x/smp.c | 2 +- 3 files changed, 31 insertions(+), 26 deletions(-) diff --git a/lib/alloc_page.h b/lib/alloc_page.h index 6fd2ff0..1af1419 100644 --- a/lib/alloc_page.h +++ b/lib/alloc_page.h @@ -11,8 +11,13 @@ #include <stdbool.h> #include <asm/memory_areas.h> -#define AREA_ANY -1 -#define AREA_ANY_NUMBER 0xff +#define AREA_ANY_NUMBER 0xff + +#define AREA_ANY 0x00000 +#define AREA_MASK 0x0ffff + +#define FLAG_DONTZERO 0x10000 +#define FLAG_FRESH 0x20000 /* Returns true if the page allocator has been initialized */ bool page_alloc_initialized(void); @@ -30,39 +35,39 @@ void page_alloc_init_area(u8 n, phys_addr_t base_pfn, phys_addr_t top_pfn); void page_alloc_ops_enable(void); /* - * Allocate aligned memory from the specified areas. - * areas is a bitmap of allowed areas + * Allocate aligned memory with the specified flags. + * flags is a bitmap of allowed areas and flags. * alignment must be a power of 2 */ -void *memalign_pages_area(unsigned int areas, size_t alignment, size_t size); +void *memalign_pages_flags(size_t alignment, size_t size, unsigned int flags); /* - * Allocate aligned memory from any area. - * Equivalent to memalign_pages_area(AREA_ANY, alignment, size). + * Allocate aligned memory from any area and with default flags. + * Equivalent to memalign_pages_flags(alignment, size, AREA_ANY). */ static inline void *memalign_pages(size_t alignment, size_t size) { - return memalign_pages_area(AREA_ANY, alignment, size); + return memalign_pages_flags(alignment, size, AREA_ANY); } /* - * Allocate naturally aligned memory from the specified areas. - * Equivalent to memalign_pages_area(areas, 1ull << order, 1ull << order). + * Allocate 1ull << order naturally aligned pages with the specified flags. + * Equivalent to memalign_pages_flags(1ull << order, 1ull << order, flags). */ -void *alloc_pages_area(unsigned int areas, unsigned int order); +void *alloc_pages_flags(unsigned int order, unsigned int flags); /* - * Allocate naturally aligned pages from any area; the number of allocated - * pages is 1 << order. - * Equivalent to alloc_pages_area(AREA_ANY, order); + * Allocate 1ull << order naturally aligned pages from any area and with + * default flags. + * Equivalent to alloc_pages_flags(order, AREA_ANY); */ static inline void *alloc_pages(unsigned int order) { - return alloc_pages_area(AREA_ANY, order); + return alloc_pages_flags(order, AREA_ANY); } /* - * Allocate one page from any area. + * Allocate one page from any area and with default flags. * Equivalent to alloc_pages(0); */ static inline void *alloc_page(void) @@ -83,7 +88,7 @@ void free_pages(void *mem); */ static inline void free_page(void *mem) { - return free_pages(mem); + free_pages(mem); } /* diff --git a/lib/alloc_page.c b/lib/alloc_page.c index d8b2758..47e2981 100644 --- a/lib/alloc_page.c +++ b/lib/alloc_page.c @@ -361,16 +361,16 @@ void unreserve_pages(phys_addr_t addr, size_t n) spin_unlock(&lock); } -static void *page_memalign_order_area(unsigned area, u8 ord, u8 al) +static void *page_memalign_order_flags(u8 al, u8 ord, u32 flags) { void *res = NULL; - int i; + int i, area; spin_lock(&lock); - area &= areas_mask; + area = (flags & AREA_MASK) ? flags & areas_mask : areas_mask; for (i = 0; !res && (i < MAX_AREAS); i++) if (area & BIT(i)) - res = page_memalign_order(areas + i, ord, al); + res = page_memalign_order(areas + i, al, ord); spin_unlock(&lock); return res; } @@ -379,23 +379,23 @@ static void *page_memalign_order_area(unsigned area, u8 ord, u8 al) * Allocates (1 << order) physically contiguous and naturally aligned pages. * Returns NULL if the allocation was not possible. */ -void *alloc_pages_area(unsigned int area, unsigned int order) +void *alloc_pages_flags(unsigned int order, unsigned int flags) { - return page_memalign_order_area(area, order, order); + return page_memalign_order_flags(order, order, flags); } /* * Allocates (1 << order) physically contiguous aligned pages. * Returns NULL if the allocation was not possible. */ -void *memalign_pages_area(unsigned int area, size_t alignment, size_t size) +void *memalign_pages_flags(size_t alignment, size_t size, unsigned int flags) { assert(is_power_of_2(alignment)); alignment = get_order(PAGE_ALIGN(alignment) >> PAGE_SHIFT); size = get_order(PAGE_ALIGN(size) >> PAGE_SHIFT); assert(alignment < NLISTS); assert(size < NLISTS); - return page_memalign_order_area(area, size, alignment); + return page_memalign_order_flags(alignment, size, flags); } diff --git a/lib/s390x/smp.c b/lib/s390x/smp.c index 77d80ca..44b2eb4 100644 --- a/lib/s390x/smp.c +++ b/lib/s390x/smp.c @@ -190,7 +190,7 @@ int smp_cpu_setup(uint16_t addr, struct psw psw) sigp_retry(cpu->addr, SIGP_INITIAL_CPU_RESET, 0, NULL); - lc = alloc_pages_area(AREA_DMA31, 1); + lc = alloc_pages_flags(1, AREA_DMA31); cpu->lowcore = lc; memset(lc, 0, PAGE_SIZE * 2); sigp_retry(cpu->addr, SIGP_SET_PREFIX, (unsigned long )lc, NULL); -- 2.26.2