On Mon, Jul 27, 2020 at 12:21 PM Roman Gushchin <guro@xxxxxx> wrote: > > Do not use rlimit-based memory accounting for hashtab maps. > It has been replaced with the memcg-based memory accounting. > > Signed-off-by: Roman Gushchin <guro@xxxxxx> Acked-by: Song Liu <songliubraving@xxxxxx> > --- > kernel/bpf/hashtab.c | 19 +------------------ > 1 file changed, 1 insertion(+), 18 deletions(-) > > diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c > index 9d0432170812..9372b559b4e7 100644 > --- a/kernel/bpf/hashtab.c > +++ b/kernel/bpf/hashtab.c > @@ -422,7 +422,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) > bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); > bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); > struct bpf_htab *htab; > - u64 cost; > int err; > > htab = kzalloc(sizeof(*htab), GFP_USER | __GFP_ACCOUNT); > @@ -459,26 +458,12 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) > htab->n_buckets > U32_MAX / sizeof(struct bucket)) > goto free_htab; > > - cost = (u64) htab->n_buckets * sizeof(struct bucket) + > - (u64) htab->elem_size * htab->map.max_entries; > - > - if (percpu) > - cost += (u64) round_up(htab->map.value_size, 8) * > - num_possible_cpus() * htab->map.max_entries; > - else > - cost += (u64) htab->elem_size * num_possible_cpus(); > - > - /* if map size is larger than memlock limit, reject it */ > - err = bpf_map_charge_init(&htab->map.memory, cost); > - if (err) > - goto free_htab; > - > err = -ENOMEM; > htab->buckets = bpf_map_area_alloc(htab->n_buckets * > sizeof(struct bucket), > htab->map.numa_node); > if (!htab->buckets) > - goto free_charge; > + goto free_htab; > > if (htab->map.map_flags & BPF_F_ZERO_SEED) > htab->hashrnd = 0; > @@ -508,8 +493,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) > prealloc_destroy(htab); > free_buckets: > bpf_map_area_free(htab->buckets); > -free_charge: > - bpf_map_charge_finish(&htab->map.memory); > free_htab: > kfree(htab); > return ERR_PTR(err); > -- > 2.26.2 >