[PATCH v1 bpf-next 1/2] bpf: Support BPF_F_MMAPABLE task_local storage

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch modifies the generic bpf_local_storage infrastructure to
support mmapable map values and adds mmap() handling to task_local
storage leveraging this new functionality. A userspace task which
mmap's a task_local storage map will receive a pointer to the map_value
corresponding to that tasks' key - mmap'ing in other tasks' mapvals is
not supported in this patch.

Currently, struct bpf_local_storage_elem contains both bookkeeping
information as well as a struct bpf_local_storage_data with additional
bookkeeping information and the actual mapval data. We can't simply map
the page containing this struct into userspace. Instead, mmapable
local_storage uses bpf_local_storage_data's data field to point to the
actual mapval, which is allocated separately such that it can be
mmapped. Only the mapval lives on the page(s) allocated for it.

The lifetime of the actual_data mmapable region is tied to the
bpf_local_storage_elem which points to it. This doesn't necessarily mean
that the pages go away when the bpf_local_storage_elem is free'd - if
they're mapped into some userspace process they will remain until
unmapped, but are no longer the task_local storage's mapval.

Implementation details:

  * A few small helpers are added to deal with bpf_local_storage_data's
    'data' field having different semantics when the local_storage map
    is mmapable. With their help, many of the changes to existing code
    are purely mechanical (e.g. sdata->data becomes sdata_mapval(sdata),
    selem->elem_size becomes selem_bytes_used(selem)).

  * The map flags are copied into bpf_local_storage_data when its
    containing bpf_local_storage_elem is alloc'd, since the
    bpf_local_storage_map associated with them may be gone when
    bpf_local_storage_data is free'd, and testing flags for
    BPF_F_MMAPABLE is necessary when free'ing to ensure that the
    mmapable region is free'd.
    * The extra field doesn't change bpf_local_storage_elem's size.
      There were 48 bytes of padding after the bpf_local_storage_data
      field, now there are 40.

  * Currently, bpf_local_storage_update always creates a new
    bpf_local_storage_elem for the 'updated' value - the only exception
    being if the map_value has a bpf_spin_lock field, in which case the
    spin lock is grabbed instead of the less granular bpf_local_storage
    lock, and the value updated in place. This inplace update behavior
    is desired for mmapable local_storage map_values as well, since
    creating a new selem would result in new mmapable pages.

  * The size of the mmapable pages are accounted for when calling
    mem_{charge,uncharge}. If the pages are mmap'd into a userspace task
    mem_uncharge may be called before they actually go away.

Signed-off-by: Dave Marchevsky <davemarchevsky@xxxxxx>
---
 include/linux/bpf_local_storage.h |  14 ++-
 kernel/bpf/bpf_local_storage.c    | 145 ++++++++++++++++++++++++------
 kernel/bpf/bpf_task_storage.c     |  35 ++++++--
 kernel/bpf/syscall.c              |   2 +-
 4 files changed, 163 insertions(+), 33 deletions(-)

diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index 173ec7f43ed1..114973f925ea 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -69,7 +69,17 @@ struct bpf_local_storage_data {
 	 * the number of cachelines accessed during the cache hit case.
 	 */
 	struct bpf_local_storage_map __rcu *smap;
-	u8 data[] __aligned(8);
+	/* Need to duplicate smap's map_flags as smap may be gone when
+	 * it's time to free bpf_local_storage_data
+	 */
+	u64 smap_map_flags;
+	/* If BPF_F_MMAPABLE, this is a void * to separately-alloc'd data
+	 * Otherwise the actual mapval data lives here
+	 */
+	union {
+		DECLARE_FLEX_ARRAY(u8, data) __aligned(8);
+		void *actual_data __aligned(8);
+	};
 };
 
 /* Linked to bpf_local_storage and bpf_local_storage_map */
@@ -124,6 +134,8 @@ static struct bpf_local_storage_cache name = {			\
 /* Helper functions for bpf_local_storage */
 int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
 
+void *sdata_mapval(struct bpf_local_storage_data *data);
+
 struct bpf_map *
 bpf_local_storage_map_alloc(union bpf_attr *attr,
 			    struct bpf_local_storage_cache *cache,
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index 146824cc9689..9b3becbcc1a3 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -15,7 +15,8 @@
 #include <linux/rcupdate_trace.h>
 #include <linux/rcupdate_wait.h>
 
-#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
+#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK \
+	(BPF_F_NO_PREALLOC | BPF_F_CLONE | BPF_F_MMAPABLE)
 
 static struct bpf_local_storage_map_bucket *
 select_bucket(struct bpf_local_storage_map *smap,
@@ -24,6 +25,51 @@ select_bucket(struct bpf_local_storage_map *smap,
 	return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
 }
 
+struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map);
+
+void *alloc_mmapable_selem_value(struct bpf_local_storage_map *smap)
+{
+	struct mem_cgroup *memcg, *old_memcg;
+	void *ptr;
+
+	memcg = bpf_map_get_memcg(&smap->map);
+	old_memcg = set_active_memcg(memcg);
+	ptr = bpf_map_area_mmapable_alloc(PAGE_ALIGN(smap->map.value_size),
+					  NUMA_NO_NODE);
+	set_active_memcg(old_memcg);
+	mem_cgroup_put(memcg);
+
+	return ptr;
+}
+
+void *sdata_mapval(struct bpf_local_storage_data *data)
+{
+	if (data->smap_map_flags & BPF_F_MMAPABLE)
+		return data->actual_data;
+	return &data->data;
+}
+
+static size_t sdata_data_field_size(struct bpf_local_storage_map *smap,
+				    struct bpf_local_storage_data *data)
+{
+	if (smap->map.map_flags & BPF_F_MMAPABLE)
+		return sizeof(void *);
+	return (size_t)smap->map.value_size;
+}
+
+static u32 selem_bytes_used(struct bpf_local_storage_map *smap)
+{
+	if (smap->map.map_flags & BPF_F_MMAPABLE)
+		return smap->elem_size + PAGE_ALIGN(smap->map.value_size);
+	return smap->elem_size;
+}
+
+static bool can_update_existing_selem(struct bpf_local_storage_map *smap,
+				      u64 flags)
+{
+	return flags & BPF_F_LOCK || smap->map.map_flags & BPF_F_MMAPABLE;
+}
+
 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
 {
 	struct bpf_map *map = &smap->map;
@@ -76,10 +122,19 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
 		void *value, bool charge_mem, gfp_t gfp_flags)
 {
 	struct bpf_local_storage_elem *selem;
+	void *mmapable_value = NULL;
+	u32 selem_mem;
 
-	if (charge_mem && mem_charge(smap, owner, smap->elem_size))
+	selem_mem = selem_bytes_used(smap);
+	if (charge_mem && mem_charge(smap, owner, selem_mem))
 		return NULL;
 
+	if (smap->map.map_flags & BPF_F_MMAPABLE) {
+		mmapable_value = alloc_mmapable_selem_value(smap);
+		if (!mmapable_value)
+			goto err_out;
+	}
+
 	if (smap->bpf_ma) {
 		migrate_disable();
 		selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
@@ -92,22 +147,28 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
 			 * only does bpf_mem_cache_free when there is
 			 * no other bpf prog is using the selem.
 			 */
-			memset(SDATA(selem)->data, 0, smap->map.value_size);
+			memset(SDATA(selem)->data, 0,
+			       sdata_data_field_size(smap, SDATA(selem)));
 	} else {
 		selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
 					gfp_flags | __GFP_NOWARN);
 	}
 
-	if (selem) {
-		if (value)
-			copy_map_value(&smap->map, SDATA(selem)->data, value);
-		/* No need to call check_and_init_map_value as memory is zero init */
-		return selem;
-	}
-
+	if (!selem)
+		goto err_out;
+
+	selem->sdata.smap_map_flags = smap->map.map_flags;
+	if (smap->map.map_flags & BPF_F_MMAPABLE)
+		selem->sdata.actual_data = mmapable_value;
+	if (value)
+		copy_map_value(&smap->map, sdata_mapval(SDATA(selem)), value);
+	/* No need to call check_and_init_map_value as memory is zero init */
+	return selem;
+err_out:
+	if (mmapable_value)
+		bpf_map_area_free(mmapable_value);
 	if (charge_mem)
-		mem_uncharge(smap, owner, smap->elem_size);
-
+		mem_uncharge(smap, owner, selem_mem);
 	return NULL;
 }
 
@@ -184,6 +245,21 @@ static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
 	}
 }
 
+static void __bpf_selem_kfree(struct bpf_local_storage_elem *selem)
+{
+	if (selem->sdata.smap_map_flags & BPF_F_MMAPABLE)
+		bpf_map_area_free(selem->sdata.actual_data);
+	kfree(selem);
+}
+
+static void __bpf_selem_kfree_rcu(struct rcu_head *rcu)
+{
+	struct bpf_local_storage_elem *selem;
+
+	selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
+	__bpf_selem_kfree(selem);
+}
+
 /* rcu tasks trace callback for bpf_ma == false */
 static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
 {
@@ -191,9 +267,9 @@ static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
 
 	selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
 	if (rcu_trace_implies_rcu_gp())
-		kfree(selem);
+		__bpf_selem_kfree(selem);
 	else
-		kfree_rcu(selem, rcu);
+		call_rcu(rcu, __bpf_selem_kfree_rcu);
 }
 
 /* Handle bpf_ma == false */
@@ -201,7 +277,7 @@ static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
 			     bool vanilla_rcu)
 {
 	if (vanilla_rcu)
-		kfree_rcu(selem, rcu);
+		call_rcu(&selem->rcu, __bpf_selem_kfree_rcu);
 	else
 		call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu);
 }
@@ -209,8 +285,12 @@ static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
 static void bpf_selem_free_rcu(struct rcu_head *rcu)
 {
 	struct bpf_local_storage_elem *selem;
+	struct bpf_local_storage_map *smap;
 
 	selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
+	smap = selem->sdata.smap;
+	if (selem->sdata.smap_map_flags & BPF_F_MMAPABLE)
+		bpf_map_area_free(selem->sdata.actual_data);
 	bpf_mem_cache_raw_free(selem);
 }
 
@@ -241,6 +321,8 @@ void bpf_selem_free(struct bpf_local_storage_elem *selem,
 		 * immediately.
 		 */
 		migrate_disable();
+		if (smap->map.map_flags & BPF_F_MMAPABLE)
+			bpf_map_area_free(selem->sdata.actual_data);
 		bpf_mem_cache_free(&smap->selem_ma, selem);
 		migrate_enable();
 	}
@@ -266,7 +348,7 @@ static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_stor
 	 * from local_storage.
 	 */
 	if (uncharge_mem)
-		mem_uncharge(smap, owner, smap->elem_size);
+		mem_uncharge(smap, owner, selem_bytes_used(smap));
 
 	free_local_storage = hlist_is_singular_node(&selem->snode,
 						    &local_storage->list);
@@ -583,14 +665,14 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
 		err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
 		if (err) {
 			bpf_selem_free(selem, smap, true);
-			mem_uncharge(smap, owner, smap->elem_size);
+			mem_uncharge(smap, owner, selem_bytes_used(smap));
 			return ERR_PTR(err);
 		}
 
 		return SDATA(selem);
 	}
 
-	if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
+	if (can_update_existing_selem(smap, map_flags) && !(map_flags & BPF_NOEXIST)) {
 		/* Hoping to find an old_sdata to do inline update
 		 * such that it can avoid taking the local_storage->lock
 		 * and changing the lists.
@@ -601,8 +683,13 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
 		if (err)
 			return ERR_PTR(err);
 		if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
-			copy_map_value_locked(&smap->map, old_sdata->data,
-					      value, false);
+			if (map_flags & BPF_F_LOCK)
+				copy_map_value_locked(&smap->map,
+						      sdata_mapval(old_sdata),
+						      value, false);
+			else
+				copy_map_value(&smap->map, sdata_mapval(old_sdata),
+					       value);
 			return old_sdata;
 		}
 	}
@@ -633,8 +720,8 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
 		goto unlock;
 
 	if (old_sdata && (map_flags & BPF_F_LOCK)) {
-		copy_map_value_locked(&smap->map, old_sdata->data, value,
-				      false);
+		copy_map_value_locked(&smap->map, sdata_mapval(old_sdata),
+				      value, false);
 		selem = SELEM(old_sdata);
 		goto unlock;
 	}
@@ -656,7 +743,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
 unlock:
 	raw_spin_unlock_irqrestore(&local_storage->lock, flags);
 	if (alloc_selem) {
-		mem_uncharge(smap, owner, smap->elem_size);
+		mem_uncharge(smap, owner, selem_bytes_used(smap));
 		bpf_selem_free(alloc_selem, smap, true);
 	}
 	return err ? ERR_PTR(err) : SDATA(selem);
@@ -707,6 +794,10 @@ int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
 	if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
 		return -E2BIG;
 
+	if ((attr->map_flags & BPF_F_MMAPABLE) &&
+	    attr->map_type != BPF_MAP_TYPE_TASK_STORAGE)
+		return -EINVAL;
+
 	return 0;
 }
 
@@ -820,8 +911,12 @@ bpf_local_storage_map_alloc(union bpf_attr *attr,
 		raw_spin_lock_init(&smap->buckets[i].lock);
 	}
 
-	smap->elem_size = offsetof(struct bpf_local_storage_elem,
-				   sdata.data[attr->value_size]);
+	if (attr->map_flags & BPF_F_MMAPABLE)
+		smap->elem_size = offsetof(struct bpf_local_storage_elem,
+					   sdata.data[sizeof(void *)]);
+	else
+		smap->elem_size = offsetof(struct bpf_local_storage_elem,
+					   sdata.data[attr->value_size]);
 
 	smap->bpf_ma = bpf_ma;
 	if (bpf_ma) {
diff --git a/kernel/bpf/bpf_task_storage.c b/kernel/bpf/bpf_task_storage.c
index adf6dfe0ba68..ce75c8d8b2ce 100644
--- a/kernel/bpf/bpf_task_storage.c
+++ b/kernel/bpf/bpf_task_storage.c
@@ -90,6 +90,7 @@ void bpf_task_storage_free(struct task_struct *task)
 static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
 {
 	struct bpf_local_storage_data *sdata;
+	struct bpf_local_storage_map *smap;
 	struct task_struct *task;
 	unsigned int f_flags;
 	struct pid *pid;
@@ -114,7 +115,8 @@ static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
 	sdata = task_storage_lookup(task, map, true);
 	bpf_task_storage_unlock();
 	put_pid(pid);
-	return sdata ? sdata->data : NULL;
+	smap = (struct bpf_local_storage_map *)map;
+	return sdata ? sdata_mapval(sdata) : NULL;
 out:
 	put_pid(pid);
 	return ERR_PTR(err);
@@ -209,18 +211,19 @@ static void *__bpf_task_storage_get(struct bpf_map *map,
 				    u64 flags, gfp_t gfp_flags, bool nobusy)
 {
 	struct bpf_local_storage_data *sdata;
+	struct bpf_local_storage_map *smap;
 
+	smap = (struct bpf_local_storage_map *)map;
 	sdata = task_storage_lookup(task, map, nobusy);
 	if (sdata)
-		return sdata->data;
+		return sdata_mapval(sdata);
 
 	/* only allocate new storage, when the task is refcounted */
 	if (refcount_read(&task->usage) &&
 	    (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy) {
-		sdata = bpf_local_storage_update(
-			task, (struct bpf_local_storage_map *)map, value,
-			BPF_NOEXIST, gfp_flags);
-		return IS_ERR(sdata) ? NULL : sdata->data;
+		sdata = bpf_local_storage_update(task, smap, value,
+						 BPF_NOEXIST, gfp_flags);
+		return IS_ERR(sdata) ? NULL : sdata_mapval(sdata);
 	}
 
 	return NULL;
@@ -317,6 +320,25 @@ static void task_storage_map_free(struct bpf_map *map)
 	bpf_local_storage_map_free(map, &task_cache, &bpf_task_storage_busy);
 }
 
+static int task_storage_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
+{
+	void *data;
+
+	if (!(map->map_flags & BPF_F_MMAPABLE) || vma->vm_pgoff ||
+	    (vma->vm_end - vma->vm_start) < map->value_size)
+		return -EINVAL;
+
+	WARN_ON_ONCE(!bpf_rcu_lock_held());
+	bpf_task_storage_lock();
+	data = __bpf_task_storage_get(map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE,
+				      0, true);
+	bpf_task_storage_unlock();
+	if (!data)
+		return -EINVAL;
+
+	return remap_vmalloc_range(vma, data, vma->vm_pgoff);
+}
+
 BTF_ID_LIST_GLOBAL_SINGLE(bpf_local_storage_map_btf_id, struct, bpf_local_storage_map)
 const struct bpf_map_ops task_storage_map_ops = {
 	.map_meta_equal = bpf_map_meta_equal,
@@ -331,6 +353,7 @@ const struct bpf_map_ops task_storage_map_ops = {
 	.map_mem_usage = bpf_local_storage_map_mem_usage,
 	.map_btf_id = &bpf_local_storage_map_btf_id[0],
 	.map_owner_storage_ptr = task_storage_ptr,
+	.map_mmap = task_storage_map_mmap,
 };
 
 const struct bpf_func_proto bpf_task_storage_get_recur_proto = {
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5e43ddd1b83f..d7c05a509870 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -404,7 +404,7 @@ static void bpf_map_release_memcg(struct bpf_map *map)
 		obj_cgroup_put(map->objcg);
 }
 
-static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
+struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map)
 {
 	if (map->objcg)
 		return get_mem_cgroup_from_objcg(map->objcg);
-- 
2.34.1






[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux