Using HPAGE_PMD_SIZE as the size for bpf_prog_pack is not ideal in some cases. Specifically, for NUMA systems, __vmalloc_node_range requires PMD_SIZE * num_online_nodes() to allocate huge pages. Also, if the system does not support huge pages (i.e., with cmdline option nohugevmalloc), it is better to use PAGE_SIZE packs. Add logic to select proper size for bpf_prog_pack. This solution is not ideal, as it makes assumption about the behavior of module_alloc and __vmalloc_node_range. However, it appears to be the easiest solution as it doesn't require changes in module_alloc and vmalloc code. Signed-off-by: Song Liu <song@xxxxxxxxxx> --- kernel/bpf/core.c | 66 +++++++++++++++++++++++++++++++---------------- 1 file changed, 44 insertions(+), 22 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ab630f773ec1..957b198364eb 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -33,6 +33,7 @@ #include <linux/extable.h> #include <linux/log2.h> #include <linux/bpf_verifier.h> +#include <linux/nodemask.h> #include <asm/barrier.h> #include <asm/unaligned.h> @@ -814,15 +815,9 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86) * to host BPF programs. */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -#define BPF_PROG_PACK_SIZE HPAGE_PMD_SIZE -#else -#define BPF_PROG_PACK_SIZE PAGE_SIZE -#endif #define BPF_PROG_CHUNK_SHIFT 6 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT) #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1)) -#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE) struct bpf_prog_pack { struct list_head list; @@ -830,30 +825,57 @@ struct bpf_prog_pack { unsigned long bitmap[]; }; -#define BPF_PROG_MAX_PACK_PROG_SIZE BPF_PROG_PACK_SIZE #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE) +static size_t bpf_prog_pack_size = -1; + +static inline int bpf_prog_chunk_count(void) +{ + WARN_ON_ONCE(bpf_prog_pack_size == -1); + return bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE; +} + static DEFINE_MUTEX(pack_mutex); static LIST_HEAD(pack_list); static struct bpf_prog_pack *alloc_new_pack(void) { struct bpf_prog_pack *pack; + size_t size; + void *ptr; - pack = kzalloc(sizeof(*pack) + BITS_TO_BYTES(BPF_PROG_CHUNK_COUNT), GFP_KERNEL); - if (!pack) + if (bpf_prog_pack_size == -1) { + /* Test whether we can get huge pages. If not just use + * PAGE_SIZE packs. + */ + size = PMD_SIZE * num_online_nodes(); + ptr = module_alloc(size); + if (ptr && is_vm_area_hugepages(ptr)) { + bpf_prog_pack_size = size; + goto got_ptr; + } else { + bpf_prog_pack_size = PAGE_SIZE; + vfree(ptr); + } + } + + ptr = module_alloc(bpf_prog_pack_size); + if (!ptr) return NULL; - pack->ptr = module_alloc(BPF_PROG_PACK_SIZE); - if (!pack->ptr) { - kfree(pack); +got_ptr: + pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(bpf_prog_chunk_count())), + GFP_KERNEL); + if (!pack) { + vfree(ptr); return NULL; } - bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE); + pack->ptr = ptr; + bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE); list_add_tail(&pack->list, &pack_list); set_vm_flush_reset_perms(pack->ptr); - set_memory_ro((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE); - set_memory_x((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE); + set_memory_ro((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE); + set_memory_x((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE); return pack; } @@ -864,7 +886,7 @@ static void *bpf_prog_pack_alloc(u32 size) unsigned long pos; void *ptr = NULL; - if (size > BPF_PROG_MAX_PACK_PROG_SIZE) { + if (size > bpf_prog_pack_size) { size = round_up(size, PAGE_SIZE); ptr = module_alloc(size); if (ptr) { @@ -876,9 +898,9 @@ static void *bpf_prog_pack_alloc(u32 size) } mutex_lock(&pack_mutex); list_for_each_entry(pack, &pack_list, list) { - pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0, + pos = bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0, nbits, 0); - if (pos < BPF_PROG_CHUNK_COUNT) + if (pos < bpf_prog_chunk_count()) goto found_free_area; } @@ -904,12 +926,12 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr) unsigned long pos; void *pack_ptr; - if (hdr->size > BPF_PROG_MAX_PACK_PROG_SIZE) { + if (hdr->size > bpf_prog_pack_size) { module_memfree(hdr); return; } - pack_ptr = (void *)((unsigned long)hdr & ~(BPF_PROG_PACK_SIZE - 1)); + pack_ptr = (void *)((unsigned long)hdr & ~(bpf_prog_pack_size - 1)); mutex_lock(&pack_mutex); list_for_each_entry(tmp, &pack_list, list) { @@ -926,8 +948,8 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr) pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >> BPF_PROG_CHUNK_SHIFT; bitmap_clear(pack->bitmap, pos, nbits); - if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0, - BPF_PROG_CHUNK_COUNT, 0) == 0) { + if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0, + bpf_prog_chunk_count(), 0) == 0) { list_del(&pack->list); module_memfree(pack->ptr); kfree(pack); -- 2.30.2