On Mon, Aug 09, 2021 at 05:37:48PM +0800, Kefeng Wang wrote: > diff --git a/mm/vmalloc.c b/mm/vmalloc.c > index d5cd52805149..1e8fe08725b8 100644 > --- a/mm/vmalloc.c > +++ b/mm/vmalloc.c > @@ -2238,11 +2238,17 @@ void __init vm_area_add_early(struct vm_struct *vm) > */ > void __init vm_area_register_early(struct vm_struct *vm, size_t align) > { > - static size_t vm_init_off __initdata; > - unsigned long addr; > - > - addr = ALIGN(VMALLOC_START + vm_init_off, align); > - vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; > + struct vm_struct *head = vmlist, *curr, *next; > + unsigned long addr = ALIGN(VMALLOC_START, align); > + > + while (head != NULL) { Nitpick: I'd use the same pattern as in vm_area_add_early(), i.e. a 'for' loop. You might as well insert it directly than calling the add function and going through the loop again. Not a strong preference either way. > + next = head->next; > + curr = head; > + head = next; > + addr = ALIGN((unsigned long)curr->addr + curr->size, align); > + if (next && (unsigned long)next->addr - addr > vm->size) Is greater or equal sufficient? > + break; > + } > > vm->addr = (void *)addr; Another nitpick: it's very unlikely on a 64-bit architecture but not impossible on 32-bit to hit VMALLOC_END here. Maybe some BUG_ON. -- Catalin