From: Alexei Starovoitov <ast@xxxxxxxxxx> Same low/high watermarks for every bucket in bpf_mem_cache consume significant amount of memory. Preallocating 64 elements of PAGE_SIZE to the free list is not efficient. Make low/high watermarks and batching value depend on element size. This change brings significant memory savings. Signed-off-by: Alexei Starovoitov <ast@xxxxxxxxxx> --- kernel/bpf/memalloc.c | 64 ++++++++++++++++++++++++++++++------------- 1 file changed, 45 insertions(+), 19 deletions(-) diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index a43630371b9f..be8262f5c9ec 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -105,6 +105,7 @@ struct bpf_mem_cache { atomic_t free_cnt_nmi; /* flag to refill nmi list too */ bool refill_nmi_list; + int low_watermark, high_watermark, batch; }; struct bpf_mem_caches { @@ -123,14 +124,6 @@ static struct llist_node notrace *__llist_del_first(struct llist_head *head) return entry; } -#define BATCH 48 -#define LOW_WATERMARK 32 -#define HIGH_WATERMARK 96 -/* Assuming the average number of elements per bucket is 64, when all buckets - * are used the total memory will be: 64*16*32 + 64*32*32 + 64*64*32 + ... + - * 64*4096*32 ~ 20Mbyte - */ - /* extra macro useful for testing by randomizing in_nmi condition */ #define bpf_in_nmi() in_nmi() @@ -238,7 +231,7 @@ static void free_bulk(struct bpf_mem_cache *c) if (IS_ENABLED(CONFIG_PREEMPT_RT)) local_irq_restore(flags); free_one(c, llnode); - } while (cnt > (HIGH_WATERMARK + LOW_WATERMARK) / 2); + } while (cnt > (c->high_watermark + c->low_watermark) / 2); } static void free_bulk_nmi(struct bpf_mem_cache *c) @@ -253,7 +246,7 @@ static void free_bulk_nmi(struct bpf_mem_cache *c) else cnt = 0; free_one(c, llnode); - } while (cnt > (HIGH_WATERMARK + LOW_WATERMARK) / 2); + } while (cnt > (c->high_watermark + c->low_watermark) / 2); } static void bpf_mem_refill(struct irq_work *work) @@ -262,12 +255,12 @@ static void bpf_mem_refill(struct irq_work *work) int cnt; cnt = c->free_cnt; - if (cnt < LOW_WATERMARK) + if (cnt < c->low_watermark) /* irq_work runs on this cpu and kmalloc will allocate * from the current numa node which is what we want here. */ - alloc_bulk(c, BATCH, NUMA_NO_NODE); - else if (cnt > HIGH_WATERMARK) + alloc_bulk(c, c->batch, NUMA_NO_NODE); + else if (cnt > c->high_watermark) free_bulk(c); if (!c->refill_nmi_list) @@ -276,9 +269,9 @@ static void bpf_mem_refill(struct irq_work *work) */ return; cnt = atomic_read(&c->free_cnt_nmi); - if (cnt < LOW_WATERMARK) - alloc_bulk_nmi(c, BATCH, NUMA_NO_NODE); - else if (cnt > HIGH_WATERMARK) + if (cnt < c->low_watermark) + alloc_bulk_nmi(c, c->batch, NUMA_NO_NODE); + else if (cnt > c->high_watermark) free_bulk_nmi(c); c->refill_nmi_list = false; } @@ -294,14 +287,47 @@ static void notrace irq_work_raise(struct bpf_mem_cache *c, bool in_nmi) irq_work_queue(&c->refill_work); } +/* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket + * the freelist cache will be elem_size * 64 (or less) on each cpu. + * + * For bpf programs that don't have statically known allocation sizes and + * assuming (low_mark + high_mark) / 2 as an average number of elements per + * bucket and all buckets are used the total amount of memory in freelists + * on each cpu will be: + * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096 + * + nmi's reserves + * 1*16 + 1*32 + 1*64 + 1*96 + 1*128 + 1*196 + 1*256 + 1*512 + 1*1024 + 1*2048 + 1*4096 + * == ~ 122 Kbyte using below heuristic. + * In unlikely worst case where bpf progs used all allocations sizes from + * non-NMI and from NMI too: ~ 227 Kbyte per cpu. + * Initialized, but unused bpf allocator (not bpf map specific one) will + * consume ~ 19 Kbyte per cpu. + * Typical case will be between 19K and 122K closer to 19K. + * bpf progs can and should share bpf_mem_cache when possible. + */ + static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) { init_irq_work(&c->refill_work, bpf_mem_refill); + if (c->unit_size <= 256) { + c->low_watermark = 32; + c->high_watermark = 96; + } else { + /* When page_size == 4k, order-0 cache will have low_mark == 2 + * and high_mark == 6 with batch alloc of 3 individual pages at + * a time. + * 8k allocs and above low == 1, high == 3, batch == 1. + */ + c->low_watermark = max(32 * 256 / c->unit_size, 1); + c->high_watermark = max(96 * 256 / c->unit_size, 3); + } + c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1); + /* To avoid consuming memory assume that 1st run of bpf * prog won't be doing more than 4 map_update_elem from * irq disabled region */ - alloc_bulk(c, c->unit_size < 256 ? 4 : 1, cpu_to_node(cpu)); + alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu)); /* NMI progs are rare. Assume they have one map_update * per prog at the very beginning. @@ -442,7 +468,7 @@ static void notrace *unit_alloc(struct bpf_mem_cache *c) } WARN_ON(cnt < 0); - if (cnt < LOW_WATERMARK) + if (cnt < c->low_watermark) irq_work_raise(c, in_nmi); return llnode; } @@ -471,7 +497,7 @@ static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) } WARN_ON(cnt <= 0); - if (cnt > HIGH_WATERMARK) + if (cnt > c->high_watermark) /* free few objects from current cpu into global kmalloc pool */ irq_work_raise(c, in_nmi); } -- 2.30.2