From: Cong Wang <cong.wang@xxxxxxxxxxxxx> The second parameter of bpf_map_free_id() is always true, hence can be just eliminated. Cc: Alexei Starovoitov <ast@xxxxxxxxxx> Cc: Daniel Borkmann <daniel@xxxxxxxxxxxxx> Cc: John Fastabend <john.fastabend@xxxxxxxxx> Signed-off-by: Cong Wang <cong.wang@xxxxxxxxxxxxx> --- include/linux/bpf.h | 2 +- kernel/bpf/offload.c | 2 +- kernel/bpf/syscall.c | 23 +++++------------------ 3 files changed, 7 insertions(+), 20 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index ae7771c7d750..3558c192871c 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1833,7 +1833,7 @@ struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog); void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); -void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); +void bpf_map_free_id(struct bpf_map *map); struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset, enum btf_field_type type); diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 13e4efc971e6..ae6d5a5c0798 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -412,7 +412,7 @@ static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap) { WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE)); /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */ - bpf_map_free_id(&offmap->map, true); + bpf_map_free_id(&offmap->map); list_del_init(&offmap->offloads); offmap->netdev = NULL; } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 35ffd808f281..1eaa1a18aef7 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -390,7 +390,7 @@ static int bpf_map_alloc_id(struct bpf_map *map) return id > 0 ? 0 : id; } -void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) +void bpf_map_free_id(struct bpf_map *map) { unsigned long flags; @@ -402,18 +402,10 @@ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) if (!map->id) return; - if (do_idr_lock) - spin_lock_irqsave(&map_idr_lock, flags); - else - __acquire(&map_idr_lock); - + spin_lock_irqsave(&map_idr_lock, flags); idr_remove(&map_idr, map->id); map->id = 0; - - if (do_idr_lock) - spin_unlock_irqrestore(&map_idr_lock, flags); - else - __release(&map_idr_lock); + spin_unlock_irqrestore(&map_idr_lock, flags); } #ifdef CONFIG_MEMCG_KMEM @@ -708,11 +700,11 @@ static void bpf_map_put_uref(struct bpf_map *map) /* decrement map refcnt and schedule it for freeing via workqueue * (unrelying map implementation ops->map_free() might sleep) */ -static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) +void bpf_map_put(struct bpf_map *map) { if (atomic64_dec_and_test(&map->refcnt)) { /* bpf_map_free_id() must be called first */ - bpf_map_free_id(map, do_idr_lock); + bpf_map_free_id(map); btf_put(map->btf); INIT_WORK(&map->work, bpf_map_free_deferred); /* Avoid spawning kworkers, since they all might contend @@ -721,11 +713,6 @@ static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) queue_work(system_unbound_wq, &map->work); } } - -void bpf_map_put(struct bpf_map *map) -{ - __bpf_map_put(map, true); -} EXPORT_SYMBOL_GPL(bpf_map_put); void bpf_map_put_with_uref(struct bpf_map *map) -- 2.34.1