Re: [PATCH v2 bpf-next 2/2] selftests/bpf: add inline assembly helpers to access array elements

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, Jan 03, 2024 at 01:53:59PM -0500, Barret Rhoden wrote:

SNIP

> +
> +
> +/* Test that attempting to load a bad program fails. */
> +#define test_bad(PROG) ({						\
> +	struct array_elem_test *skel;					\
> +	int err;							\
> +	skel = array_elem_test__open();					\
> +	if (!ASSERT_OK_PTR(skel, "array_elem_test open"))		\
> +		return;							\
> +	bpf_program__set_autoload(skel->progs.x_bad_ ## PROG, true); 	\
> +	err = array_elem_test__load(skel);				\
> +	ASSERT_ERR(err, "array_elem_test load " # PROG);		\
> +	array_elem_test__destroy(skel);					\
> +})

I wonder we could use the existing RUN_TESTS macro and use tags
in programs like we do for example in progs/test_global_func1.c:

  SEC("tc")
  __failure __msg("combined stack size of 4 calls is 544")
  int global_func1(struct __sk_buff *skb)

jirka


> +
> +void test_test_array_elem(void)
> +{
> +	if (test__start_subtest("array_elem_access_all"))
> +		test_access_all();
> +	if (test__start_subtest("array_elem_oob_access"))
> +		test_oob_access();
> +	if (test__start_subtest("array_elem_access_array_map_infer_sz"))
> +		test_access_array_map_infer_sz();
> +	if (test__start_subtest("array_elem_bad_map_array_access"))
> +		test_bad(map_array_access);
> +	if (test__start_subtest("array_elem_bad_bss_array_access"))
> +		test_bad(bss_array_access);
> +}
> diff --git a/tools/testing/selftests/bpf/progs/array_elem_test.c b/tools/testing/selftests/bpf/progs/array_elem_test.c
> new file mode 100644
> index 000000000000..9d48afc933f0
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/progs/array_elem_test.c
> @@ -0,0 +1,195 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright (c) 2024 Google LLC. */
> +#include <stdbool.h>
> +#include <linux/types.h>
> +#include <linux/bpf.h>
> +#include <bpf/bpf_helpers.h>
> +#include <bpf/bpf_tracing.h>
> +#include "bpf_misc.h"
> +
> +char _license[] SEC("license") = "GPL";
> +
> +int target_pid = 0;
> +
> +#define NR_MAP_ELEMS 100
> +
> +/*
> + * We want to test valid accesses into an array, but we also need to fool the
> + * verifier.  If we just do for (i = 0; i < 100; i++), the verifier knows the
> + * value of i and can tell we're inside the array.
> + *
> + * This "lookup" array is just the values 0, 1, 2..., such that
> + * lookup_indexes[i] == i.  (set by userspace).  But the verifier doesn't know
> + * that.
> + */
> +unsigned int lookup_indexes[NR_MAP_ELEMS];
> +
> +/* Arrays can be in the BSS or inside a map element.  Make sure both work. */
> +int bss_elems[NR_MAP_ELEMS];
> +
> +struct map_array {
> +	int elems[NR_MAP_ELEMS];
> +};
> +
> +/*
> + * This is an ARRAY_MAP of a single struct, and that struct is an array of
> + * elements.  Userspace can mmap the map as if it was just a basic array of
> + * elements.  Though if you make an ARRAY_MAP where the *values* are ints, don't
> + * forget that bpf map elements are rounded up to 8 bytes.
> + *
> + * Once you get the pointer to the base of the inner array, you can access all
> + * of the elements without another bpf_map_lookup_elem(), which is useful if you
> + * are operating on multiple elements while holding a spinlock.
> + */
> +struct {
> +	__uint(type, BPF_MAP_TYPE_ARRAY);
> +	__uint(max_entries, 1);
> +	__type(key, int);
> +	__type(value, struct map_array);
> +	__uint(map_flags, BPF_F_MMAPABLE);
> +} arraymap SEC(".maps");
> +
> +static struct map_array *get_map_array(void)
> +{
> +	int zero = 0;
> +
> +	return bpf_map_lookup_elem(&arraymap, &zero);
> +}
> +
> +static int *get_map_elems(void)
> +{
> +	struct map_array *arr = get_map_array();
> +
> +	if (!arr)
> +		return NULL;
> +	return arr->elems;
> +}
> +
> +/*
> + * Test that we can access all elements, and that we are accessing the element
> + * we think we are accessing.
> + */
> +static void access_all(void)
> +{
> +	int *map_elems = get_map_elems();
> +	int *x;
> +
> +	for (int i = 0; i < NR_MAP_ELEMS; i++) {
> +		x = bpf_array_elem(map_elems, NR_MAP_ELEMS, lookup_indexes[i]);
> +		if (x)
> +			*x = i;
> +	}
> +
> +	for (int i = 0; i < NR_MAP_ELEMS; i++) {
> +		x = bpf_array_sz_elem(bss_elems, lookup_indexes[i]);
> +		if (x)
> +			*x = i;
> +	}
> +}
> +
> +SEC("?tp/syscalls/sys_enter_nanosleep")
> +int x_access_all(void *ctx)
> +{
> +	if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
> +		return 0;
> +	access_all();
> +	return 0;
> +}
> +
> +/*
> + * Helper for various OOB tests.  An out-of-bound access should be handled like
> + * a lookup failure.  Specifically, the verifier should ensure we do not access
> + * outside the array.  Userspace will check that we didn't access somewhere
> + * inside the array.
> + */
> +static void set_elem_to_1(long idx)
> +{
> +	int *map_elems = get_map_elems();
> +	int *x;
> +
> +	x = bpf_array_elem(map_elems, NR_MAP_ELEMS, idx);
> +	if (x)
> +		*x = 1;
> +	x = bpf_array_sz_elem(bss_elems, idx);
> +	if (x)
> +		*x = 1;
> +}
> +
> +/*
> + * Test various out-of-bounds accesses.
> + */
> +static void oob_access(void)
> +{
> +	set_elem_to_1(NR_MAP_ELEMS + 5);
> +	set_elem_to_1(NR_MAP_ELEMS);
> +	set_elem_to_1(-1);
> +	set_elem_to_1(~0UL);
> +}
> +
> +SEC("?tp/syscalls/sys_enter_nanosleep")
> +int x_oob_access(void *ctx)
> +{
> +	if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
> +		return 0;
> +	oob_access();
> +	return 0;
> +}
> +
> +/*
> + * Test that we can use the ARRAY_SIZE-style helper with an array in a map.
> + *
> + * Note that you cannot infer the size of the array from just a pointer; you
> + * have to use the actual elems[100].  i.e. this will fail and should fail to
> + * compile (-Wsizeof-pointer-div):
> + *
> + *	int *map_elems = get_map_elems();
> + *	x = bpf_array_sz_elem(map_elems, lookup_indexes[i]);
> + */
> +static void access_array_map_infer_sz(void)
> +{
> +	struct map_array *arr = get_map_array();
> +	int *x;
> +
> +	for (int i = 0; i < NR_MAP_ELEMS; i++) {
> +		x = bpf_array_sz_elem(arr->elems, lookup_indexes[i]);
> +		if (x)
> +			*x = i;
> +	}
> +}
> +
> +SEC("?tp/syscalls/sys_enter_nanosleep")
> +int x_access_array_map_infer_sz(void *ctx)
> +{
> +	if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
> +		return 0;
> +	access_array_map_infer_sz();
> +	return 0;
> +}
> +
> +
> +
> +SEC("?tp/syscalls/sys_enter_nanosleep")
> +int x_bad_map_array_access(void *ctx)
> +{
> +	int *map_elems = get_map_elems();
> +
> +	/*
> +	 * Need to check to promote map_elems from MAP_OR_NULL to MAP so that we
> +	 * fail to load below for the right reason.
> +	 */
> +	if (!map_elems)
> +		return 0;
> +	/* Fail to load: we don't prove our access is inside map_elems[] */
> +	for (int i = 0; i < NR_MAP_ELEMS; i++)
> +		map_elems[lookup_indexes[i]] = i;
> +	return 0;
> +}
> +
> +SEC("?tp/syscalls/sys_enter_nanosleep")
> +int x_bad_bss_array_access(void *ctx)
> +{
> +	/* Fail to load: we don't prove our access is inside bss_elems[] */
> +	for (int i = 0; i < NR_MAP_ELEMS; i++)
> +		bss_elems[lookup_indexes[i]] = i;
> +	return 0;
> +}
> diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h
> index 2fd59970c43a..002bab44cde2 100644
> --- a/tools/testing/selftests/bpf/progs/bpf_misc.h
> +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h
> @@ -135,4 +135,47 @@
>  /* make it look to compiler like value is read and written */
>  #define __sink(expr) asm volatile("" : "+g"(expr))
>  
> +/*
> + * Access an array element within a bound, such that the verifier knows the
> + * access is safe.
> + *
> + * This macro asm is the equivalent of:
> + *
> + *	if (!arr)
> + *		return NULL;
> + *	if (idx >= arr_sz)
> + *		return NULL;
> + *	return &arr[idx];
> + *
> + * The index (___idx below) needs to be a u64, at least for certain versions of
> + * the BPF ISA, since there aren't u32 conditional jumps.
> + */
> +#define bpf_array_elem(arr, arr_sz, idx) ({				\
> +	typeof(&(arr)[0]) ___arr = arr;					\
> +	__u64 ___idx = idx;						\
> +	if (___arr) {							\
> +		asm volatile("if %[__idx] >= %[__bound] goto 1f;	\
> +			      %[__idx] *= %[__size];		\
> +			      %[__arr] += %[__idx];		\
> +			      goto 2f;				\
> +			      1:;				\
> +			      %[__arr] = 0;			\
> +			      2:				\
> +			      "						\
> +			     : [__arr]"+r"(___arr), [__idx]"+r"(___idx)	\
> +			     : [__bound]"r"((arr_sz)),		        \
> +			       [__size]"i"(sizeof(typeof((arr)[0])))	\
> +			     : "cc");					\
> +	}								\
> +	___arr;								\
> +})
> +
> +/*
> + * Convenience wrapper for bpf_array_elem(), where we compute the size of the
> + * array.  Be sure to use an actual array, and not a pointer, just like with the
> + * ARRAY_SIZE macro.
> + */
> +#define bpf_array_sz_elem(arr, idx) \
> +	bpf_array_elem(arr, sizeof(arr) / sizeof((arr)[0]), idx)
> +
>  #endif
> -- 
> 2.43.0.472.g3155946c3a-goog
> 
> 




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux