Re: [PATCH bpf-next] bpf: Pass map file to .map_update_batch directly

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 11/6/22 11:55 PM, Hou Tao wrote:
From: Hou Tao <houtao1@xxxxxxxxxx>

Currently generic_map_update_batch() will get map file from
attr->batch.map_fd and pass it to bpf_map_update_value(). The problem is
map_fd may have been closed or reopened as a different file type and
generic_map_update_batch() doesn't check the validity of map_fd.

It doesn't incur any problem as for now, because only
BPF_MAP_TYPE_PERF_EVENT_ARRAY uses the passed map file and it doesn't
support batch update operation. But it is better to fix the potential
use of an invalid map file.

I think we don't have problem here. The reason is in bpf_map_do_batch()
we have
	f = fdget(ufd);
	...
	BPF_DO_BATCH(map->ops->map_update_batch);
	fdput(f)

So the original ufd is still valid during map->ops->map_update_batch
which eventually may call generic_map_update_batch() which tries to
do fdget(ufd) again.

Did I miss anything here?


Checking the validity of map file returned from fdget() in
generic_map_update_batch() can not fix the problem, because the returned
map file may be different with map file got in bpf_map_do_batch() due to
the reopening of fd, so just passing the map file directly to
.map_update_batch() in bpf_map_do_batch().

Signed-off-by: Hou Tao <houtao1@xxxxxxxxxx>
---
  include/linux/bpf.h  |  5 +++--
  kernel/bpf/syscall.c | 31 ++++++++++++++++++-------------
  2 files changed, 21 insertions(+), 15 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 798aec816970..20cfe88ee6df 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -85,7 +85,8 @@ struct bpf_map_ops {
  	int (*map_lookup_and_delete_batch)(struct bpf_map *map,
  					   const union bpf_attr *attr,
  					   union bpf_attr __user *uattr);
-	int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
+	int (*map_update_batch)(struct bpf_map *map, struct file *map_file,
+				const union bpf_attr *attr,
  				union bpf_attr __user *uattr);
  	int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
  				union bpf_attr __user *uattr);
@@ -1776,7 +1777,7 @@ void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
  int  generic_map_lookup_batch(struct bpf_map *map,
  			      const union bpf_attr *attr,
  			      union bpf_attr __user *uattr);
-int  generic_map_update_batch(struct bpf_map *map,
+int  generic_map_update_batch(struct bpf_map *map, struct file *map_file,
  			      const union bpf_attr *attr,
  			      union bpf_attr __user *uattr);
  int  generic_map_delete_batch(struct bpf_map *map,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 85532d301124..cb8a87277bf8 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -175,8 +175,8 @@ static void maybe_wait_bpf_programs(struct bpf_map *map)
  		synchronize_rcu();
  }
-static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
-				void *value, __u64 flags)
+static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
+				void *key, void *value, __u64 flags)
  {
  	int err;
@@ -190,7 +190,7 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
  		   map->map_type == BPF_MAP_TYPE_SOCKMAP) {
  		return sock_map_update_elem_sys(map, key, value, flags);
  	} else if (IS_FD_PROG_ARRAY(map)) {
-		return bpf_fd_array_map_update_elem(map, f.file, key, value,
+		return bpf_fd_array_map_update_elem(map, map_file, key, value,
  						    flags);
  	}
@@ -205,12 +205,12 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
  						       flags);
  	} else if (IS_FD_ARRAY(map)) {
  		rcu_read_lock();
-		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
+		err = bpf_fd_array_map_update_elem(map, map_file, key, value,
  						   flags);
  		rcu_read_unlock();
  	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
  		rcu_read_lock();
-		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
+		err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
  						  flags);
  		rcu_read_unlock();
  	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
@@ -1390,7 +1390,7 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
  		goto free_key;
  	}
- err = bpf_map_update_value(map, f, key, value, attr->flags);
+	err = bpf_map_update_value(map, f.file, key, value, attr->flags);
kvfree(value);
  free_key:
@@ -1576,16 +1576,14 @@ int generic_map_delete_batch(struct bpf_map *map,
  	return err;
  }
-int generic_map_update_batch(struct bpf_map *map,
+int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
  			     const union bpf_attr *attr,
  			     union bpf_attr __user *uattr)
  {
  	void __user *values = u64_to_user_ptr(attr->batch.values);
  	void __user *keys = u64_to_user_ptr(attr->batch.keys);
  	u32 value_size, cp, max_count;
-	int ufd = attr->batch.map_fd;
  	void *key, *value;
-	struct fd f;
  	int err = 0;
if (attr->batch.elem_flags & ~BPF_F_LOCK)
@@ -1612,7 +1610,6 @@ int generic_map_update_batch(struct bpf_map *map,
  		return -ENOMEM;
  	}
- f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */
  	for (cp = 0; cp < max_count; cp++) {
  		err = -EFAULT;
  		if (copy_from_user(key, keys + cp * map->key_size,
@@ -1620,7 +1617,7 @@ int generic_map_update_batch(struct bpf_map *map,
  		    copy_from_user(value, values + cp * value_size, value_size))
  			break;
- err = bpf_map_update_value(map, f, key, value,
+		err = bpf_map_update_value(map, map_file, key, value,
  					   attr->batch.elem_flags);
if (err)
@@ -1633,7 +1630,6 @@ int generic_map_update_batch(struct bpf_map *map,
kvfree(value);
  	kvfree(key);
-	fdput(f);
  	return err;
  }
@@ -4435,6 +4431,15 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
  		err = fn(map, attr, uattr);	\
  	} while (0)
+#define BPF_DO_BATCH_WITH_FILE(fn) \
+	do {						\
+		if (!fn) {				\
+			err = -ENOTSUPP;		\
+			goto err_put;			\
+		}					\
+		err = fn(map, f.file, attr, uattr);	\
+	} while (0)
+
  static int bpf_map_do_batch(const union bpf_attr *attr,
  			    union bpf_attr __user *uattr,
  			    int cmd)
@@ -4470,7 +4475,7 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
  	else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
  		BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
  	else if (cmd == BPF_MAP_UPDATE_BATCH)
-		BPF_DO_BATCH(map->ops->map_update_batch);
+		BPF_DO_BATCH_WITH_FILE(map->ops->map_update_batch);
  	else
  		BPF_DO_BATCH(map->ops->map_delete_batch);
  err_put:



[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux