> + if (!page_order) { > + area->nr_pages = alloc_pages_bulk_array_node( > + gfp_mask, node, nr_small_pages, area->pages); > + } else { > + /* > + * Careful, we allocate and map page_order pages, but tracking is done > + * per PAGE_SIZE page so as to keep the vm_struct APIs independent of Comments over 80 lines are completely unreadable, so please avoid them. > + * the physical/mapped size. > + */ > + while (area->nr_pages < nr_small_pages) { > + struct page *page; > + int i; > + > + /* Compound pages required for remap_vmalloc_page */ > + page = alloc_pages_node(node, gfp_mask | __GFP_COMP, page_order); > + if (unlikely(!page)) > + break; > > + for (i = 0; i < (1U << page_order); i++) > + area->pages[area->nr_pages + i] = page + i; > > + if (gfpflags_allow_blocking(gfp_mask)) > + cond_resched(); > + > + area->nr_pages += 1U << page_order; > + } In fact splitting this whole high order allocation logic into a little helper would massivel benefit the function by ordering it more logical and reducing a level of indentation. > + /* > + * If not enough pages were obtained to accomplish an > + * allocation request, free them via __vfree() if any. > + */ > + if (area->nr_pages != nr_small_pages) { > + warn_alloc(gfp_mask, NULL, > + "vmalloc size %lu allocation failure: " > + "page order %u allocation failed", > + area->nr_pages * PAGE_SIZE, page_order); > + goto fail; > + } >From reading __alloc_pages_bulk not allocating all pages is something that cn happen fairly easily. Shouldn't we try to allocate the missing pages manually and/ore retry here? > + > + if (vmap_pages_range(addr, addr + size, prot, area->pages, page_shift) < 0) { Another pointlessly long line.