> On Feb 11, 2022, at 6:35 AM, Daniel Borkmann <daniel@xxxxxxxxxxxxx> wrote: > > On 2/10/22 5:51 PM, Song Liu wrote: >>> On Feb 10, 2022, at 12:25 AM, Daniel Borkmann <daniel@xxxxxxxxxxxxx> wrote: >>> On 2/10/22 7:41 AM, Song Liu wrote: >>>> bpf_prog_pack uses huge pages to reduce pressue on instruction TLB. >>>> To guarantee allocating huge pages for bpf_prog_pack, it is necessary to >>>> allocate memory of size PMD_SIZE * num_online_nodes(). >>>> On the other hand, if the system doesn't support huge pages, it is more >>>> efficient to allocate PAGE_SIZE bpf_prog_pack. >>>> Address different scenarios with more flexible bpf_prog_pack_size(). >>>> Signed-off-by: Song Liu <song@xxxxxxxxxx> >>>> --- >>>> kernel/bpf/core.c | 47 +++++++++++++++++++++++++++-------------------- >>>> 1 file changed, 27 insertions(+), 20 deletions(-) >>>> diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c >>>> index 42d96549a804..d961a1f07a13 100644 >>>> --- a/kernel/bpf/core.c >>>> +++ b/kernel/bpf/core.c >>>> @@ -814,46 +814,53 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, >>>> * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86) >>>> * to host BPF programs. >>>> */ >>>> -#ifdef CONFIG_TRANSPARENT_HUGEPAGE >>>> -#define BPF_PROG_PACK_SIZE HPAGE_PMD_SIZE >>>> -#else >>>> -#define BPF_PROG_PACK_SIZE PAGE_SIZE >>>> -#endif >>>> #define BPF_PROG_CHUNK_SHIFT 6 >>>> #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT) >>>> #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1)) >>>> -#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE) >>>> struct bpf_prog_pack { >>>> struct list_head list; >>>> void *ptr; >>>> - unsigned long bitmap[BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)]; >>>> + unsigned long bitmap[]; >>>> }; >>>> -#define BPF_PROG_MAX_PACK_PROG_SIZE BPF_PROG_PACK_SIZE >>>> #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE) >>>> static DEFINE_MUTEX(pack_mutex); >>>> static LIST_HEAD(pack_list); >>>> +static inline int bpf_prog_pack_size(void) >>>> +{ >>>> + /* If vmap_allow_huge == true, use pack size of the smallest >>>> + * possible vmalloc huge page: PMD_SIZE * num_online_nodes(). >>>> + * Otherwise, use pack size of PAGE_SIZE. >>>> + */ >>>> + return get_vmap_allow_huge() ? PMD_SIZE * num_online_nodes() : PAGE_SIZE; >>>> +} >>> >>> Imho, this is making too many assumptions about implementation details. Can't we >>> just add a new module_alloc*() API instead which internally guarantees allocating >>> huge pages when enabled/supported (e.g. with a __weak function as fallback)? >> I agree that this is making too many assumptions. But a new module_alloc_huge() >> may not work, because we need the caller to know the proper size to ask for. >> (Or maybe I misunderstood your suggestion?) >> How about we introduce something like >> /* minimal size to get huge pages from vmalloc. If not possible, >> * return 0 (or -1?) >> */ >> int vmalloc_hpage_min_size(void) >> { >> return vmap_allow_huge ? PMD_SIZE * num_online_nodes() : 0; >> } > > And that would live inside mm/vmalloc.c and is exported to users ... Yeah, this will go to vmalloc.c. > >> /* minimal size to get huge pages from module_alloc */ >> int module_alloc_hpage_min_size(void) >> { >> return vmalloc_hpage_min_size(); >> } > > ... and this one as wrapper in module alloc infra with __weak attr? And this goes to some module.c file(s). I am not quite sure whether we need __weak attr or not. > >> static inline int bpf_prog_pack_size(void) >> { >> return module_alloc_hpage_min_size() ? : PAGE_SIZE; >> } > > Could probably work. It's not nice, but at least in the corresponding places so it's > not exposed / hard coded inside bpf and assuming implementation details which could > potentially break later on. I don't really like it either. Another way to do this is to test the required size for bpf_prog_pack in BPF code, something like the following. The pro of this version is that we don't need changes in vmalloc and module code. Thanks, Song diff --git i/kernel/bpf/core.c w/kernel/bpf/core.c index 44623c9b5bb1..3cfd0f0c93d2 100644 --- i/kernel/bpf/core.c +++ w/kernel/bpf/core.c @@ -814,15 +814,9 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86) * to host BPF programs. */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -#define BPF_PROG_PACK_SIZE HPAGE_PMD_SIZE -#else -#define BPF_PROG_PACK_SIZE PAGE_SIZE -#endif #define BPF_PROG_CHUNK_SHIFT 6 #define BPF_PROG_CHUNK_SIZE (1 << BPF_PROG_CHUNK_SHIFT) #define BPF_PROG_CHUNK_MASK (~(BPF_PROG_CHUNK_SIZE - 1)) -#define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE) struct bpf_prog_pack { struct list_head list; @@ -830,30 +824,56 @@ struct bpf_prog_pack { unsigned long bitmap[]; }; -#define BPF_PROG_MAX_PACK_PROG_SIZE BPF_PROG_PACK_SIZE #define BPF_PROG_SIZE_TO_NBITS(size) (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE) +static int bpf_prog_pack_size = -1; + +static inline int bpf_prog_chunk_count(void) +{ + WARN_ON_ONCE(bpf_prog_pack_size == -1); + return bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE; +} + static DEFINE_MUTEX(pack_mutex); static LIST_HEAD(pack_list); static struct bpf_prog_pack *alloc_new_pack(void) { struct bpf_prog_pack *pack; + void *ptr; + int size; - pack = kzalloc(sizeof(*pack) + BITS_TO_BYTES(BPF_PROG_CHUNK_COUNT), GFP_KERNEL); - if (!pack) + /* Test whether we can get huge pages. If not just use PAGE_SIZE + * packs. + */ + if (bpf_prog_pack_size == -1) { + size = PMD_SIZE * num_online_nodes(); + ptr = module_alloc(size); + if (is_vm_area_hugepages(ptr)) { + bpf_prog_pack_size = size; + goto got_ptr; + } else { + bpf_prog_pack_size = PAGE_SIZE; + vfree(ptr); + } + } + + ptr = module_alloc(bpf_prog_pack_size); + if (!ptr) return NULL; - pack->ptr = module_alloc(BPF_PROG_PACK_SIZE); - if (!pack->ptr) { - kfree(pack); +got_ptr: + pack = kzalloc(sizeof(*pack) + BITS_TO_BYTES(bpf_prog_chunk_count()), GFP_KERNEL); + if (!pack) { + vfree(ptr); return NULL; } - bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE); + pack->ptr = ptr; + bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE); list_add_tail(&pack->list, &pack_list); set_vm_flush_reset_perms(pack->ptr); - set_memory_ro((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE); - set_memory_x((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE); + set_memory_ro((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE); + set_memory_x((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE); return pack; } @@ -864,7 +884,7 @@ static void *bpf_prog_pack_alloc(u32 size) unsigned long pos; void *ptr = NULL; - if (size > BPF_PROG_MAX_PACK_PROG_SIZE) { + if (size > bpf_prog_pack_size) { size = round_up(size, PAGE_SIZE); ptr = module_alloc(size); if (ptr) { @@ -876,9 +896,9 @@ static void *bpf_prog_pack_alloc(u32 size) } mutex_lock(&pack_mutex); list_for_each_entry(pack, &pack_list, list) { - pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0, + pos = bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0, nbits, 0); - if (pos < BPF_PROG_CHUNK_COUNT) + if (pos < bpf_prog_chunk_count()) goto found_free_area; } @@ -904,12 +924,12 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr) unsigned long pos; void *pack_ptr; - if (hdr->size > BPF_PROG_MAX_PACK_PROG_SIZE) { + if (hdr->size > bpf_prog_pack_size) { module_memfree(hdr); return; } - pack_ptr = (void *)((unsigned long)hdr & ~(BPF_PROG_PACK_SIZE - 1)); + pack_ptr = (void *)((unsigned long)hdr & ~(bpf_prog_pack_size - 1)); mutex_lock(&pack_mutex); list_for_each_entry(tmp, &pack_list, list) { @@ -926,8 +946,8 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr) pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >> BPF_PROG_CHUNK_SHIFT; bitmap_clear(pack->bitmap, pos, nbits); - if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0, - BPF_PROG_CHUNK_COUNT, 0) == 0) { + if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0, + bpf_prog_chunk_count(), 0) == 0) { list_del(&pack->list); module_memfree(pack->ptr); kfree(pack);