Re: [PATCH v2 bpf 03/11] bpftool: use a local bpf_perf_event_value to fix accessing its fields

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Apr 21, 2022 at 12:39:04AM +0000, Alexander Lobakin wrote:
> Fix the following error when building bpftool:
> 
>   CLANG   profiler.bpf.o
>   CLANG   pid_iter.bpf.o
> skeleton/profiler.bpf.c:18:21: error: invalid application of 'sizeof' to an incomplete type 'struct bpf_perf_event_value'
>         __uint(value_size, sizeof(struct bpf_perf_event_value));
>                            ^     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> tools/bpf/bpftool/bootstrap/libbpf/include/bpf/bpf_helpers.h:13:39: note: expanded from macro '__uint'
> tools/bpf/bpftool/bootstrap/libbpf/include/bpf/bpf_helper_defs.h:7:8: note: forward declaration of 'struct bpf_perf_event_value'
> struct bpf_perf_event_value;
>        ^
> 
> struct bpf_perf_event_value is being used in the kernel only when
> CONFIG_BPF_EVENTS is enabled, so it misses a BTF entry then.
> Define struct bpf_perf_event_value___local with the
> `preserve_access_index` attribute inside the pid_iter BPF prog to
> allow compiling on any configs. It is a full mirror of a UAPI
> structure, so is compatible both with and w/o CO-RE.
> bpf_perf_event_read_value() requires a pointer of the original type,
> so a cast is needed.
> 

Hi Alexander,
What's the status of this series? I wasn't able to find a v3 on lore.

We received a report that OpenMandriva is carrying around this patch.
https://github.com/ClangBuiltLinux/linux/issues/1805.

+ Tomasz

Tomasz, do you have more info which particular configs can reproduce
this issue? Is this patch still necessary?

> Fixes: 47c09d6a9f67 ("bpftool: Introduce "prog profile" command")
> Suggested-by: Andrii Nakryiko <andrii@xxxxxxxxxx>
> Signed-off-by: Alexander Lobakin <alobakin@xxxxx>
> ---
>  tools/bpf/bpftool/skeleton/profiler.bpf.c | 27 ++++++++++++++---------
>  1 file changed, 17 insertions(+), 10 deletions(-)
> 
> diff --git a/tools/bpf/bpftool/skeleton/profiler.bpf.c b/tools/bpf/bpftool/skeleton/profiler.bpf.c
> index ce5b65e07ab1..2f80edc682f1 100644
> --- a/tools/bpf/bpftool/skeleton/profiler.bpf.c
> +++ b/tools/bpf/bpftool/skeleton/profiler.bpf.c
> @@ -4,6 +4,12 @@
>  #include <bpf/bpf_helpers.h>
>  #include <bpf/bpf_tracing.h>
> 
> +struct bpf_perf_event_value___local {
> +	__u64 counter;
> +	__u64 enabled;
> +	__u64 running;
> +} __attribute__((preserve_access_index));
> +
>  /* map of perf event fds, num_cpu * num_metric entries */
>  struct {
>  	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
> @@ -15,14 +21,14 @@ struct {
>  struct {
>  	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
>  	__uint(key_size, sizeof(u32));
> -	__uint(value_size, sizeof(struct bpf_perf_event_value));
> +	__uint(value_size, sizeof(struct bpf_perf_event_value___local));
>  } fentry_readings SEC(".maps");
> 
>  /* accumulated readings */
>  struct {
>  	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
>  	__uint(key_size, sizeof(u32));
> -	__uint(value_size, sizeof(struct bpf_perf_event_value));
> +	__uint(value_size, sizeof(struct bpf_perf_event_value___local));
>  } accum_readings SEC(".maps");
> 
>  /* sample counts, one per cpu */
> @@ -39,7 +45,7 @@ const volatile __u32 num_metric = 1;
>  SEC("fentry/XXX")
>  int BPF_PROG(fentry_XXX)
>  {
> -	struct bpf_perf_event_value *ptrs[MAX_NUM_MATRICS];
> +	struct bpf_perf_event_value___local *ptrs[MAX_NUM_MATRICS];
>  	u32 key = bpf_get_smp_processor_id();
>  	u32 i;
> 
> @@ -53,10 +59,10 @@ int BPF_PROG(fentry_XXX)
>  	}
> 
>  	for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
> -		struct bpf_perf_event_value reading;
> +		struct bpf_perf_event_value___local reading;
>  		int err;
> 
> -		err = bpf_perf_event_read_value(&events, key, &reading,
> +		err = bpf_perf_event_read_value(&events, key, (void *)&reading,
>  						sizeof(reading));
>  		if (err)
>  			return 0;
> @@ -68,14 +74,14 @@ int BPF_PROG(fentry_XXX)
>  }
> 
>  static inline void
> -fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
> +fexit_update_maps(u32 id, struct bpf_perf_event_value___local *after)
>  {
> -	struct bpf_perf_event_value *before, diff;
> +	struct bpf_perf_event_value___local *before, diff;
> 
>  	before = bpf_map_lookup_elem(&fentry_readings, &id);
>  	/* only account samples with a valid fentry_reading */
>  	if (before && before->counter) {
> -		struct bpf_perf_event_value *accum;
> +		struct bpf_perf_event_value___local *accum;
> 
>  		diff.counter = after->counter - before->counter;
>  		diff.enabled = after->enabled - before->enabled;
> @@ -93,7 +99,7 @@ fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
>  SEC("fexit/XXX")
>  int BPF_PROG(fexit_XXX)
>  {
> -	struct bpf_perf_event_value readings[MAX_NUM_MATRICS];
> +	struct bpf_perf_event_value___local readings[MAX_NUM_MATRICS];
>  	u32 cpu = bpf_get_smp_processor_id();
>  	u32 i, zero = 0;
>  	int err;
> @@ -102,7 +108,8 @@ int BPF_PROG(fexit_XXX)
>  	/* read all events before updating the maps, to reduce error */
>  	for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
>  		err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
> -						readings + i, sizeof(*readings));
> +						(void *)(readings + i),
> +						sizeof(*readings));
>  		if (err)
>  			return 0;
>  	}
> --
> 2.36.0
> 
> 




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux