Re: [PATCH v1 bpf-next 9/9] selftests/bpf: Add refcounted_kptr tests

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 4/13/23 7:08 PM, Alexei Starovoitov wrote:
> On Mon, Apr 10, 2023 at 12:07:53PM -0700, Dave Marchevsky wrote:
>> Test refcounted local kptr functionality added in previous patches in
>> the series.
>>
>> Usecases which pass verification:
>>
>> * Add refcounted local kptr to both tree and list. Then, read and -
>>   possibly, depending on test variant - delete from tree, then list.
>>   * Also test doing read-and-maybe-delete in opposite order
>> * Stash a refcounted local kptr in a map_value, then add it to a
>>   rbtree. Read from both, possibly deleting after tree read.
>> * Add refcounted local kptr to both tree and list. Then, try reading and
>>   deleting twice from one of the collections.
>> * bpf_refcount_acquire of just-added non-owning ref should work, as
>>   should bpf_refcount_acquire of owning ref just out of bpf_obj_new
>>
>> Usecases which fail verification:
>>
>> * The simple successful bpf_refcount_acquire cases from above should
>>   both fail to verify if the newly-acquired owning ref is not dropped
>>
>> Signed-off-by: Dave Marchevsky <davemarchevsky@xxxxxx>
>> ---
>>  .../bpf/prog_tests/refcounted_kptr.c          |  18 +
>>  .../selftests/bpf/progs/refcounted_kptr.c     | 410 ++++++++++++++++++
>>  .../bpf/progs/refcounted_kptr_fail.c          |  72 +++
>>  3 files changed, 500 insertions(+)
>>  create mode 100644 tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
>>  create mode 100644 tools/testing/selftests/bpf/progs/refcounted_kptr.c
>>  create mode 100644 tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c
>>
>> diff --git a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
>> new file mode 100644
>> index 000000000000..2ab23832062d
>> --- /dev/null
>> +++ b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
>> @@ -0,0 +1,18 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
>> +
>> +#include <test_progs.h>
>> +#include <network_helpers.h>
>> +
>> +#include "refcounted_kptr.skel.h"
>> +#include "refcounted_kptr_fail.skel.h"
>> +
>> +void test_refcounted_kptr(void)
>> +{
>> +	RUN_TESTS(refcounted_kptr);
>> +}
>> +
>> +void test_refcounted_kptr_fail(void)
>> +{
>> +	RUN_TESTS(refcounted_kptr_fail);
>> +}
>> diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
>> new file mode 100644
>> index 000000000000..b444e4cb07fb
>> --- /dev/null
>> +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
>> @@ -0,0 +1,410 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
>> +
>> +#include <vmlinux.h>
>> +#include <bpf/bpf_tracing.h>
>> +#include <bpf/bpf_helpers.h>
>> +#include <bpf/bpf_core_read.h>
>> +#include "bpf_misc.h"
>> +#include "bpf_experimental.h"
>> +
>> +struct node_data {
>> +	long key;
>> +	long list_data;
>> +	struct bpf_rb_node r;
>> +	struct bpf_list_node l;
>> +	struct bpf_refcount ref;
>> +};
>> +
>> +struct map_value {
>> +	struct node_data __kptr *node;
>> +};
>> +
>> +struct {
>> +	__uint(type, BPF_MAP_TYPE_ARRAY);
>> +	__type(key, int);
>> +	__type(value, struct map_value);
>> +	__uint(max_entries, 1);
>> +} stashed_nodes SEC(".maps");
>> +
>> +struct node_acquire {
>> +	long key;
>> +	long data;
>> +	struct bpf_rb_node node;
>> +	struct bpf_refcount refcount;
>> +};
>> +
>> +#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
>> +private(A) struct bpf_spin_lock lock;
>> +private(A) struct bpf_rb_root root __contains(node_data, r);
>> +private(A) struct bpf_list_head head __contains(node_data, l);
>> +
>> +private(B) struct bpf_spin_lock alock;
>> +private(B) struct bpf_rb_root aroot __contains(node_acquire, node);
>> +
>> +static bool less(struct bpf_rb_node *node_a, const struct bpf_rb_node *node_b)
>> +{
>> +	struct node_data *a;
>> +	struct node_data *b;
>> +
>> +	a = container_of(node_a, struct node_data, r);
>> +	b = container_of(node_b, struct node_data, r);
>> +
>> +	return a->key < b->key;
>> +}
>> +
>> +static bool less_a(struct bpf_rb_node *a, const struct bpf_rb_node *b)
>> +{
>> +	struct node_acquire *node_a;
>> +	struct node_acquire *node_b;
>> +
>> +	node_a = container_of(a, struct node_acquire, node);
>> +	node_b = container_of(b, struct node_acquire, node);
>> +
>> +	return node_a->key < node_b->key;
>> +}
>> +
>> +static __always_inline
>> +long __insert_in_tree_and_list(struct bpf_list_head *head,
>> +			       struct bpf_rb_root *root,
>> +			       struct bpf_spin_lock *lock)
>> +{
>> +	struct node_data *n, *m;
>> +
>> +	n = bpf_obj_new(typeof(*n));
>> +	if (!n)
>> +		return -1;
>> +
>> +	m = bpf_refcount_acquire(n);
>> +	m->key = 123;
>> +	m->list_data = 456;
>> +
>> +	bpf_spin_lock(lock);
>> +	if (bpf_rbtree_add(root, &n->r, less)) {
>> +		/* Failure to insert - unexpected */
>> +		bpf_spin_unlock(lock);
>> +		bpf_obj_drop(m);
>> +		return -2;
>> +	}
>> +	bpf_spin_unlock(lock);
>> +
>> +	bpf_spin_lock(lock);
>> +	if (bpf_list_push_front(head, &m->l)) {
>> +		/* Failure to insert - unexpected */
>> +		bpf_spin_unlock(lock);
>> +		return -3;
>> +	}
>> +	bpf_spin_unlock(lock);
>> +	return 0;
>> +}
>> +
>> +static __always_inline
>> +long __stash_map_insert_tree(int idx, int val, struct bpf_rb_root *root,
>> +			     struct bpf_spin_lock *lock)
>> +{
>> +	struct map_value *mapval;
>> +	struct node_data *n, *m;
>> +
>> +	mapval = bpf_map_lookup_elem(&stashed_nodes, &idx);
>> +	if (!mapval)
>> +		return -1;
>> +
>> +	n = bpf_obj_new(typeof(*n));
>> +	if (!n)
>> +		return -2;
>> +
>> +	n->key = val;
>> +	m = bpf_refcount_acquire(n);
>> +
>> +	n = bpf_kptr_xchg(&mapval->node, n);
>> +	if (n) {
>> +		bpf_obj_drop(n);
>> +		bpf_obj_drop(m);
>> +		return -3;
>> +	}
>> +
>> +	bpf_spin_lock(lock);
>> +	if (bpf_rbtree_add(root, &m->r, less)) {
>> +		/* Failure to insert - unexpected */
>> +		bpf_spin_unlock(lock);
>> +		return -4;
>> +	}
>> +	bpf_spin_unlock(lock);
>> +	return 0;
>> +}
>> +
>> +static __always_inline
>> +long __read_from_tree(struct bpf_rb_root *root, struct bpf_spin_lock *lock,
>> +		      bool remove_from_tree)
>> +{
>> +	struct bpf_rb_node *rb;
>> +	struct node_data *n;
>> +	long res = -99;
>> +
>> +	bpf_spin_lock(lock);
>> +
>> +	rb = bpf_rbtree_first(root);
>> +	if (!rb) {
>> +		bpf_spin_unlock(lock);
>> +		return -1;
>> +	}
>> +
>> +	n = container_of(rb, struct node_data, r);
>> +	res = n->key;
>> +
>> +	if (!remove_from_tree) {
>> +		bpf_spin_unlock(lock);
>> +		return res;
>> +	}
>> +
>> +	rb = bpf_rbtree_remove(root, rb);
>> +	bpf_spin_unlock(lock);
>> +	if (!rb) {
>> +		return -2;
>> +	}
>> +	n = container_of(rb, struct node_data, r);
>> +	bpf_obj_drop(n);
>> +	return res;
>> +}
>> +
>> +static __always_inline
>> +long __read_from_list(struct bpf_list_head *head, struct bpf_spin_lock *lock,
>> +		      bool remove_from_list)
>> +{
>> +	struct bpf_list_node *l;
>> +	struct node_data *n;
>> +	long res = -99;
>> +
>> +	bpf_spin_lock(lock);
>> +
>> +	l = bpf_list_pop_front(head);
>> +	if (!l) {
>> +		bpf_spin_unlock(lock);
>> +		return -1;
>> +	}
>> +
>> +	n = container_of(l, struct node_data, l);
>> +	res = n->list_data;
>> +
>> +	if (!remove_from_list) {
>> +		if (bpf_list_push_back(head, &n->l)) {
>> +			bpf_spin_unlock(lock);
>> +			return -2;
>> +		}
>> +	}
>> +
>> +	bpf_spin_unlock(lock);
>> +
>> +	if (remove_from_list)
>> +		bpf_obj_drop(n);
>> +	return res;
>> +}
>> +
>> +static __always_inline
> 
> Why __always_inline in this 5 helpers?
> Will it pass the verifier if __always_inline is replaced with noinline?
> 

There's no good reason for __always_inline, I cargo-culted it from linked_list
test. Both replacing w/ __attribute__((noinline)) and having neither results in
passing tests, so I sent v2 with the latter.

>> +long __read_from_unstash(int idx)



[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux