There are some fixed locations in the vmalloc area be reserved in ARM(see iotable_init()) and ARM64(see map_kernel()), but for pcpu_page_first_chunk(), it calls vm_area_register_early() and choose VMALLOC_START as the start address of vmap area which could be conflicted with above address, then could trigger a BUG_ON in vm_area_add_early(). Let's choose the end of existing address range in vmlist as the start address instead of VMALLOC_START to avoid the BUG_ON. Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> --- mm/vmalloc.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d5cd52805149..1e8fe08725b8 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2238,11 +2238,17 @@ void __init vm_area_add_early(struct vm_struct *vm) */ void __init vm_area_register_early(struct vm_struct *vm, size_t align) { - static size_t vm_init_off __initdata; - unsigned long addr; - - addr = ALIGN(VMALLOC_START + vm_init_off, align); - vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; + struct vm_struct *head = vmlist, *curr, *next; + unsigned long addr = ALIGN(VMALLOC_START, align); + + while (head != NULL) { + next = head->next; + curr = head; + head = next; + addr = ALIGN((unsigned long)curr->addr + curr->size, align); + if (next && (unsigned long)next->addr - addr > vm->size) + break; + } vm->addr = (void *)addr; -- 2.26.2