We cannot yet savely enable HAVE_ARCH_HUGE_VMAP for all vmalloc in X86_64. Let bpf_prog_pack to call __vmalloc_node_range() with VM_TRY_HUGE_VMAP directly. Signed-off-by: Song Liu <song@xxxxxxxxxx> --- kernel/bpf/core.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 13e9dbeeedf3..257c6457f256 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -851,13 +851,28 @@ static LIST_HEAD(pack_list); #define BPF_HPAGE_MASK PAGE_MASK #endif +static void *bpf_prog_pack_vmalloc(unsigned long size) +{ +#if defined(MODULES_VADDR) + unsigned long start = MODULES_VADDR; + unsigned long end = MODULES_END; +#else + unsigned long start = VMALLOC_START; + unsigned long end = VMALLOC_END; +#endif + + return __vmalloc_node_range(size, PAGE_SIZE, start, end, GFP_KERNEL, PAGE_KERNEL, + VM_DEFER_KMEMLEAK | VM_TRY_HUGE_VMAP, + NUMA_NO_NODE, __builtin_return_address(0)); +} + static size_t select_bpf_prog_pack_size(void) { size_t size; void *ptr; size = BPF_HPAGE_SIZE * num_online_nodes(); - ptr = module_alloc(size); + ptr = bpf_prog_pack_vmalloc(size); /* Test whether we can get huge pages. If not just use PAGE_SIZE * packs. @@ -881,7 +896,7 @@ static struct bpf_prog_pack *alloc_new_pack(void) GFP_KERNEL); if (!pack) return NULL; - pack->ptr = module_alloc(bpf_prog_pack_size); + pack->ptr = bpf_prog_pack_vmalloc(bpf_prog_pack_size); if (!pack->ptr) { kfree(pack); return NULL; @@ -970,7 +985,7 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr) if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0, bpf_prog_chunk_count(), 0) == 0) { list_del(&pack->list); - module_memfree(pack->ptr); + vfree(pack->ptr); kfree(pack); } out: -- 2.30.2