Re: [PATCH bpf-next 3/5] bpf: add fd_array_cnt attribute for prog_load

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, 2024-11-15 at 00:46 +0000, Anton Protopopov wrote:

[...]

> @@ -22537,6 +22543,76 @@ struct btf *bpf_get_btf_vmlinux(void)
>  	return btf_vmlinux;
>  }
>  
> +/*
> + * The add_fd_from_fd_array() is executed only if fd_array_cnt is given.  In
> + * this case expect that every file descriptor in the array is either a map or
> + * a BTF, or a hole (0). Everything else is considered to be trash.
> + */
> +static int add_fd_from_fd_array(struct bpf_verifier_env *env, int fd)
> +{
> +	struct bpf_map *map;
> +	CLASS(fd, f)(fd);
> +	int ret;
> +
> +	map = __bpf_map_get(f);
> +	if (IS_ERR(map)) {
> +		if (!IS_ERR(__btf_get_by_fd(f)))
> +			return 0;
> +
> +		/* allow holes */
> +		if (!fd)
> +			return 0;
> +
> +		verbose(env, "fd %d is not pointing to valid bpf_map or btf\n", fd);
> +		return PTR_ERR(map);
> +	}
> +
> +	ret = add_used_map(env, map);
> +	if (ret < 0)
> +		return ret;
> +	return 0;
> +}

Nit: keeping this function "flat" would allow easier extension, if necessary.
     E.g.:

    static int add_fd_from_fd_array(struct bpf_verifier_env *env, int fd)
    {
    	struct bpf_map *map;
    	CLASS(fd, f)(fd);
    	int ret;

    	/* allow holes */
    	if (!fd) {
    		return 0;
    	}
    	map = __bpf_map_get(f);
    	if (!IS_ERR(map)) {
    		ret = add_used_map(env, map);
    		return ret < 0 ? ret : 0;
    	}
    	if (!IS_ERR(__btf_get_by_fd(f))) {
    		return 0;
    	}
    	verbose(env, "fd %d is not pointing to valid bpf_map or btf\n", fd);
    	return -EINVAL;
    }


> +
> +static int env_init_fd_array(struct bpf_verifier_env *env, union bpf_attr *attr, bpfptr_t uattr)
> +{
> +	int size = sizeof(int) * attr->fd_array_cnt;
> +	int *copy;
> +	int ret;
> +	int i;
> +
> +	if (attr->fd_array_cnt >= MAX_USED_MAPS)
> +		return -E2BIG;
> +
> +	env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
> +
> +	/*
> +	 * The only difference between old (no fd_array_cnt is given) and new
> +	 * APIs is that in the latter case the fd_array is expected to be
> +	 * continuous and is scanned for map fds right away
> +	 */
> +	if (!size)
> +		return 0;
> +
> +	copy = kzalloc(size, GFP_KERNEL);
> +	if (!copy)
> +		return -ENOMEM;
> +
> +	if (copy_from_bpfptr_offset(copy, env->fd_array, 0, size)) {
> +		ret = -EFAULT;
> +		goto free_copy;
> +	}
> +
> +	for (i = 0; i < attr->fd_array_cnt; i++) {
> +		ret = add_fd_from_fd_array(env, copy[i]);
> +		if (ret)
> +			goto free_copy;
> +	}
> +
> +free_copy:
> +	kfree(copy);
> +	return ret;
> +}
> +
>  int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
>  {
>  	u64 start_time = ktime_get_ns();

[...]






[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux