Re: [PATCH v3 bpf-next 10/11] libbpf,bpf: share BTF relocate-related code with kernel

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, 2024-05-10 at 11:30 +0100, Alan Maguire wrote:

[...]

> diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
> index 821063660d9f..82bd2a275a12 100644
> --- a/kernel/bpf/btf.c
> +++ b/kernel/bpf/btf.c
> @@ -274,6 +274,7 @@ struct btf {
>  	u32 start_str_off; /* first string offset (0 for base BTF) */
>  	char name[MODULE_NAME_LEN];
>  	bool kernel_btf;
> +	__u32 *base_map; /* map from distilled base BTF -> vmlinux BTF ids */
>  };
>  
>  enum verifier_phase {
> @@ -1735,7 +1736,13 @@ static void btf_free(struct btf *btf)
>  	kvfree(btf->types);
>  	kvfree(btf->resolved_sizes);
>  	kvfree(btf->resolved_ids);
> -	kvfree(btf->data);
> +	/* only split BTF allocates data, but btf->data is non-NULL for
> +	 * vmlinux BTF too.
> +	 */
> +	if (btf->base_btf)
> +		kvfree(btf->data);

Is this correct?
I see that btf->data is assigned in three functions:
- btf_parse(): allocated via kvmalloc(), does not set btf->base_btf;
- btf_parse_base(): not allocated passed from caller, either vmlinux
  or module, does not set btf->base_btf;
- btf_parse_module(): allocated via kvmalloc(), does set btf->base_btf;

So, the check above seems incorrect for btf_parse(), am I wrong?

> +	if (btf->kernel_btf)
> +		kvfree(btf->base_map);

Nit: the check could be dropped, the btf->base_map field is
     conditionally set only by btf_parse_module() to an allocated object,
     in all other cases the field is NULL.

>  	kfree(btf);
>  }
>  
> @@ -1764,6 +1771,90 @@ void btf_put(struct btf *btf)
>  	}
>  }
>  
> +struct btf *btf_base_btf(const struct btf *btf)
> +{
> +	return btf->base_btf;
> +}
> +
> +struct btf_rewrite_strs {
> +	struct btf *btf;
> +	const struct btf *old_base_btf;
> +	int str_start;
> +	int str_diff;
> +	__u32 *str_map;
> +};
> +
> +static __u32 btf_find_str(struct btf *btf, const char *s)
> +{
> +	__u32 offset = 0;
> +
> +	while (offset < btf->hdr.str_len) {
> +		while (!btf->strings[offset])
> +			offset++;
> +		if (strcmp(s, &btf->strings[offset]) == 0)
> +			return offset;
> +		while (btf->strings[offset])
> +			offset++;
> +	}
> +	return -ENOENT;
> +}
> +
> +static int btf_rewrite_strs(__u32 *str_off, void *ctx)
> +{
> +	struct btf_rewrite_strs *r = ctx;
> +	const char *s;
> +	int off;
> +
> +	if (!*str_off)
> +		return 0;
> +	if (*str_off >= r->str_start) {
> +		*str_off += r->str_diff;
> +	} else {
> +		s = btf_str_by_offset(r->old_base_btf, *str_off);
> +		if (!s)
> +			return -ENOENT;
> +		if (r->str_map[*str_off]) {
> +			off = r->str_map[*str_off];
> +		} else {
> +			off = btf_find_str(r->btf->base_btf, s);
> +			if (off < 0)
> +				return off;
> +			r->str_map[*str_off] = off;
> +		}

If 'str_map' part would be abstracted as local function 'btf__add_str'
it should be possible to move btf_rewrite_strs() and btf_set_base_btf()
to btf_common.c, right?

Also, linear scan over vmlinux BTF strings seems a bit inefficient,
maybe build a temporary hash table instead and define 'btf__find_str'?

> +		*str_off = off;
> +	}
> +	return 0;
> +}
> +
> +int btf_set_base_btf(struct btf *btf, struct btf *base_btf)
> +{
> +	struct btf_rewrite_strs r = {};
> +	struct btf_type *t;
> +	int i, err;
> +
> +	r.old_base_btf = btf_base_btf(btf);
> +	if (!r.old_base_btf)
> +		return -EINVAL;
> +	r.btf = btf;
> +	r.str_start = r.old_base_btf->hdr.str_len;
> +	r.str_diff = base_btf->hdr.str_len - r.old_base_btf->hdr.str_len;
> +	r.str_map = kvcalloc(r.old_base_btf->hdr.str_len, sizeof(*r.str_map),
> +			     GFP_KERNEL | __GFP_NOWARN);
> +	if (!r.str_map)
> +		return -ENOMEM;
> +	btf->base_btf = base_btf;
> +	btf->start_id = btf_nr_types(base_btf);
> +	btf->start_str_off = base_btf->hdr.str_len;
> +	for (i = 0; i < btf->nr_types; i++) {
> +		t = (struct btf_type *)btf_type_by_id(btf, i + btf->start_id);
> +		err = btf_type_visit_str_offs((struct btf_type *)t, btf_rewrite_strs, &r);
> +		if (err)
> +			break;
> +	}
> +	kvfree(r.str_map);
> +	return err;
> +}
> +

[...]

> diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c
> index 54949975398b..4a1fcb260f7f 100644
> --- a/tools/lib/bpf/btf_relocate.c
> +++ b/tools/lib/bpf/btf_relocate.c
> @@ -5,11 +5,43 @@
>  #define _GNU_SOURCE
>  #endif
>  
> +#ifdef __KERNEL__
> +#include <linux/bpf.h>
> +#include <linux/bsearch.h>
> +#include <linux/btf.h>
> +#include <linux/sort.h>
> +#include <linux/string.h>
> +#include <linux/bpf_verifier.h>
> +
> +#define btf_type_by_id				(struct btf_type *)btf_type_by_id
> +#define btf__type_cnt				btf_nr_types
> +#define btf__base_btf				btf_base_btf
> +#define btf__name_by_offset			btf_name_by_offset
> +#define btf_kflag				btf_type_kflag
> +
> +#define calloc(nmemb, sz)			kvcalloc(nmemb, sz, GFP_KERNEL | __GFP_NOWARN)
> +#define free(ptr)				kvfree(ptr)
> +#define qsort_r(base, num, sz, cmp, priv)	sort_r(base, num, sz, (cmp_r_func_t)cmp, NULL, priv)
> +
> +static inline __u8 btf_int_bits(const struct btf_type *t)
> +{
> +	return BTF_INT_BITS(*(__u32 *)(t + 1));
> +}
> +
> +static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t)
> +{
> +	return (struct btf_decl_tag *)(t + 1);
> +}

Nit: maybe put btf_int_bits() and btf_decl_tag() to include/linux/btf.h?
     There are already a lot of similar definitions there.
     Same for btf_var_secinfos() from btf_common.c.

> +
> +#else
> +
>  #include "btf.h"
>  #include "bpf.h"
>  #include "libbpf.h"
>  #include "libbpf_internal.h"
>  
> +#endif /* __KERNEL__ */
> +
>  struct btf;
>  
>  struct btf_relocate {






[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux