Introduce new map flag BPF_F_SELECTABLE_MEMCG for map creation. This flag is supported for all bpf maps. This is a preparation for followup patch. Signed-off-by: Yafang Shao <laoar.shao@xxxxxxxxx> --- include/uapi/linux/bpf.h | 3 +++ kernel/bpf/arraymap.c | 2 +- kernel/bpf/bloom_filter.c | 4 ++-- kernel/bpf/bpf_local_storage.c | 3 ++- kernel/bpf/bpf_struct_ops.c | 5 ++++- kernel/bpf/devmap.c | 4 ++-- kernel/bpf/hashtab.c | 2 +- kernel/bpf/local_storage.c | 2 +- kernel/bpf/queue_stack_maps.c | 2 +- kernel/bpf/ringbuf.c | 2 +- kernel/bpf/stackmap.c | 2 +- net/core/sock_map.c | 4 ++-- tools/include/uapi/linux/bpf.h | 3 +++ 13 files changed, 24 insertions(+), 14 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 59a217ca2dfd..d5fc1ea70b59 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1227,6 +1227,9 @@ enum { /* Create a map that is suitable to be an inner map with dynamic max entries */ BPF_F_INNER_MAP = (1U << 12), + +/* Selectable memcg */ + BPF_F_SELECTABLE_MEMCG = (1U << 13), }; /* Flags for BPF_PROG_QUERY. */ diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 883905c6c845..eb8deac529ac 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -17,7 +17,7 @@ #define ARRAY_CREATE_FLAG_MASK \ (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \ - BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP) + BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP | BPF_F_SELECTABLE_MEMCG) static void bpf_array_free_percpu(struct bpf_array *array) { diff --git a/kernel/bpf/bloom_filter.c b/kernel/bpf/bloom_filter.c index 9fe3c6774c40..3714aebc9ed6 100644 --- a/kernel/bpf/bloom_filter.c +++ b/kernel/bpf/bloom_filter.c @@ -9,8 +9,8 @@ #include <linux/random.h> #include <linux/btf_ids.h> -#define BLOOM_CREATE_FLAG_MASK \ - (BPF_F_NUMA_NODE | BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK) +#define BLOOM_CREATE_FLAG_MASK (BPF_F_NUMA_NODE | \ + BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK | BPF_F_SELECTABLE_MEMCG) struct bpf_bloom_filter { struct bpf_map map; diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c index e12891dcf2a9..7798235f311e 100644 --- a/kernel/bpf/bpf_local_storage.c +++ b/kernel/bpf/bpf_local_storage.c @@ -15,7 +15,8 @@ #include <linux/rcupdate_trace.h> #include <linux/rcupdate_wait.h> -#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE) +#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK \ + (BPF_F_NO_PREALLOC | BPF_F_CLONE | BPF_F_SELECTABLE_MEMCG) static struct bpf_local_storage_map_bucket * select_bucket(struct bpf_local_storage_map *smap, diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 51b7ce9902a8..208d593e6a44 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -12,6 +12,8 @@ #include <linux/mutex.h> #include <linux/btf_ids.h> +#define STRUCT_OPS_CREATE_FLAG_MASK (BPF_F_SELECTABLE_MEMCG) + enum bpf_struct_ops_state { BPF_STRUCT_OPS_STATE_INIT, BPF_STRUCT_OPS_STATE_INUSE, @@ -586,7 +588,8 @@ static void bpf_struct_ops_map_free(struct bpf_map *map) static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr) { if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 || - attr->map_flags || !attr->btf_vmlinux_value_type_id) + (attr->map_flags & ~STRUCT_OPS_CREATE_FLAG_MASK) || + !attr->btf_vmlinux_value_type_id) return -EINVAL; return 0; } diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 11c7b8411b03..52858963373c 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -50,8 +50,8 @@ #include <trace/events/xdp.h> #include <linux/btf_ids.h> -#define DEV_CREATE_FLAG_MASK \ - (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) +#define DEV_CREATE_FLAG_MASK (BPF_F_NUMA_NODE | \ + BPF_F_RDONLY | BPF_F_WRONLY | BPF_F_SELECTABLE_MEMCG) struct xdp_dev_bulk_queue { struct xdp_frame *q[DEV_MAP_BULK_SIZE]; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 3cb9486eb313..532c8ee89d58 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -17,7 +17,7 @@ #define HTAB_CREATE_FLAG_MASK \ (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \ - BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED) + BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED | BPF_F_SELECTABLE_MEMCG) #define BATCH_OPS(_name) \ .map_lookup_batch = \ diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index b2bd031aba79..009d6f43a099 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -16,7 +16,7 @@ #include "../cgroup/cgroup-internal.h" #define LOCAL_STORAGE_CREATE_FLAG_MASK \ - (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK) + (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK | BPF_F_SELECTABLE_MEMCG) struct bpf_cgroup_storage_map { struct bpf_map map; diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c index 9425df0695ac..c4aab43198ad 100644 --- a/kernel/bpf/queue_stack_maps.c +++ b/kernel/bpf/queue_stack_maps.c @@ -12,7 +12,7 @@ #include "percpu_freelist.h" #define QUEUE_STACK_CREATE_FLAG_MASK \ - (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK) + (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK | BPF_F_SELECTABLE_MEMCG) struct bpf_queue_stack { struct bpf_map map; diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index 3be472fd55da..53a7eb8db257 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -12,7 +12,7 @@ #include <uapi/linux/btf.h> #include <linux/btf_ids.h> -#define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE) +#define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE | BPF_F_SELECTABLE_MEMCG) /* non-mmap()'able part of bpf_ringbuf (everything up to consumer page) */ #define RINGBUF_PGOFF \ diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index c952c7547279..007b10d2da7d 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -14,7 +14,7 @@ #define STACK_CREATE_FLAG_MASK \ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \ - BPF_F_STACK_BUILD_ID) + BPF_F_STACK_BUILD_ID | BPF_F_SELECTABLE_MEMCG) struct stack_map_bucket { struct pcpu_freelist_node fnode; diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 4d9b730aa27f..0310b00e19b8 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -21,8 +21,8 @@ struct bpf_stab { raw_spinlock_t lock; }; -#define SOCK_CREATE_FLAG_MASK \ - (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) +#define SOCK_CREATE_FLAG_MASK (BPF_F_NUMA_NODE | \ + BPF_F_RDONLY | BPF_F_WRONLY | BPF_F_SELECTABLE_MEMCG) static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, struct bpf_prog *old, u32 which); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 59a217ca2dfd..d5fc1ea70b59 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1227,6 +1227,9 @@ enum { /* Create a map that is suitable to be an inner map with dynamic max entries */ BPF_F_INNER_MAP = (1U << 12), + +/* Selectable memcg */ + BPF_F_SELECTABLE_MEMCG = (1U << 13), }; /* Flags for BPF_PROG_QUERY. */ -- 2.17.1