On Wed, Feb 10, 2021 at 5:26 AM Jiri Olsa <jolsa@xxxxxxxxxx> wrote: > > On Tue, Feb 09, 2021 at 02:00:29PM -0800, Andrii Nakryiko wrote: > > SNIP > > > > > > I'm still trying to build the kernel.. however ;-) > > > > > > > > > > patch below adds the ftrace check only for static functions > > > > > and lets the externa go through.. but as you said, in this > > > > > case we'll need to figure out the 'notrace' and other checks > > > > > ftrace is doing > > > > > > > > > > jirka > > > > > > > > > > > > > > > --- > > > > > diff --git a/btf_encoder.c b/btf_encoder.c > > > > > index b124ec20a689..4d147406cfa5 100644 > > > > > --- a/btf_encoder.c > > > > > +++ b/btf_encoder.c > > > > > @@ -734,7 +734,7 @@ int cu__encode_btf(struct cu *cu, int verbose, bool force, > > > > > continue; > > > > > if (!has_arg_names(cu, &fn->proto)) > > > > > continue; > > > > > - if (functions_cnt) { > > > > > + if (!fn->external && functions_cnt) { > > > > > > > > I wouldn't trust DWARF, honestly. Wouldn't checking GLOBAL vs LOCAL > > > > FUNC ELF symbol be more reliable? > > > > > > that'd mean extra bsearch on each processed function, > > > on the ther hand, we'are already slow ;-) I'll check > > > how big the slowdown would be > > > > > > > We currently record addresses and do binary search. Now we need to > > record address + size and still do binary search with a slightly > > different semantics (find closest entry >= addr). Then just check that > > it overlaps, taking into account the length of the function code. It > > shouldn't result in a noticeable slowdown. Might be actually faster, > > because we might avoid callback function call costs. > > I'm still not sure how to handle the external check for function via elf, I might be missing something, but don't all functions have corresponding ELF symbols? And then symbol can have LOCAL or GLOBAL type. LOCALs are supposed to be not visible outside respective CUs (so correspond to static functions), while GLOBALs are extern-able funcs. So if func's symbol is GLOBAL, it should be ok to assume it's attachable (not inlined, at least). > but below is change for checking that ftrace addrs are within elf functions > > seems to work in my tests, I'll run some more tests and send full patch It seems unnecessarily convoluted. I was thinking about something like this (the diff will totally be screwed up by gmail, and I haven't even compiled it): diff --git a/btf_encoder.c b/btf_encoder.c index b124ec20a689..8162b238bd43 100644 --- a/btf_encoder.c +++ b/btf_encoder.c @@ -236,6 +236,23 @@ get_kmod_addrs(struct btf_elf *btfe, __u64 **paddrs, __u64 *pcount) return 0; } +struct func_seg { __u64 start; __u64 end; }; + +static int func_exists(struct func_seg *segs, size_t len, __u64 addr) +{ + size_t l = 0, r = len - 1, m; + + while (l < r) { + m = l + (r - l + 1) / 2; + if (segs[m].start <= addr) + l = m; + else + r = m - 1; + } + + return segs[l].start <= addr && addr < segs[l].end; +} + static int setup_functions(struct btf_elf *btfe, struct funcs_layout *fl) { __u64 *addrs, count, i; @@ -286,7 +303,7 @@ static int setup_functions(struct btf_elf *btfe, struct funcs_layout *fl) __u64 addr = kmod ? func->addr + func->sh_addr : func->addr; /* Make sure function is within ftrace addresses. */ - if (bsearch(&addr, addrs, count, sizeof(addrs[0]), addrs_cmp)) { + if (func_exists(addrs, count, addr)) /* * We iterate over sorted array, so we can easily skip * not valid item and move following valid field into So the idea is to use address segments and check whether there is a segment that overlaps with a given address by first binary searching for a segment with the largest starting address that is <= addr. And then just confirming that segment does overlap with the requested address. WDYT? > > jirka > > > --- > diff --git a/btf_encoder.c b/btf_encoder.c > index b124ec20a689..548a12847f99 100644 > --- a/btf_encoder.c > +++ b/btf_encoder.c > @@ -36,6 +36,7 @@ struct funcs_layout { > struct elf_function { > const char *name; > unsigned long addr; > + unsigned long end; > unsigned long sh_addr; > bool generated; > }; > @@ -44,7 +45,7 @@ static struct elf_function *functions; > static int functions_alloc; > static int functions_cnt; > > -static int functions_cmp(const void *_a, const void *_b) > +static int functions_cmp_name(const void *_a, const void *_b) > { > const struct elf_function *a = _a; > const struct elf_function *b = _b; > @@ -52,6 +53,16 @@ static int functions_cmp(const void *_a, const void *_b) > return strcmp(a->name, b->name); > } > > +static int functions_cmp_addr(const void *_a, const void *_b) > +{ > + const struct elf_function *a = _a; > + const struct elf_function *b = _b; > + > + if (a->addr == b->addr) > + return 0; > + return a->addr < b->addr ? -1 : 1; > +} > + > static void delete_functions(void) > { > free(functions); > @@ -98,6 +109,7 @@ static int collect_function(struct btf_elf *btfe, GElf_Sym *sym, > > functions[functions_cnt].name = name; > functions[functions_cnt].addr = elf_sym__value(sym); > + functions[functions_cnt].end = (__u64) -1; > functions[functions_cnt].sh_addr = sh.sh_addr; > functions[functions_cnt].generated = false; > functions_cnt++; > @@ -236,9 +248,25 @@ get_kmod_addrs(struct btf_elf *btfe, __u64 **paddrs, __u64 *pcount) > return 0; > } > > +static bool is_addr_in_func(__u64 addr, struct elf_function *func, bool kmod) > +{ > + /* > + * For vmlinux image both addrs[x] and functions[x]::addr > + * values are final address and are comparable. > + * > + * For kernel module addrs[x] is final address, but > + * functions[x]::addr is relative address within section > + * and needs to be relocated by adding sh_addr. > + */ > + __u64 start = kmod ? func->addr + func->sh_addr : func->addr; > + __u64 end = kmod ? func->end+ func->sh_addr : func->end; > + > + return start <= addr && addr < end; > +} > + > static int setup_functions(struct btf_elf *btfe, struct funcs_layout *fl) > { > - __u64 *addrs, count, i; > + __u64 *addrs, count, i_func, i_addr; > int functions_valid = 0; > bool kmod = false; > > @@ -266,43 +294,62 @@ static int setup_functions(struct btf_elf *btfe, struct funcs_layout *fl) > return 0; > } > > - qsort(addrs, count, sizeof(addrs[0]), addrs_cmp); > - qsort(functions, functions_cnt, sizeof(functions[0]), functions_cmp); > - > /* > - * Let's got through all collected functions and filter > - * out those that are not in ftrace. > + * Sort both functions and addrs so we can iterate > + * both of them simultaneously and found matching > + * func/addr pairs. > */ > - for (i = 0; i < functions_cnt; i++) { > - struct elf_function *func = &functions[i]; > - /* > - * For vmlinux image both addrs[x] and functions[x]::addr > - * values are final address and are comparable. > - * > - * For kernel module addrs[x] is final address, but > - * functions[x]::addr is relative address within section > - * and needs to be relocated by adding sh_addr. > - */ > - __u64 addr = kmod ? func->addr + func->sh_addr : func->addr; > + qsort(addrs, count, sizeof(addrs[0]), addrs_cmp); > + qsort(functions, functions_cnt, sizeof(functions[0]), functions_cmp_addr); > + > + for (i_func = 0, i_addr = 0; i_func < functions_cnt; i_func++) { > + struct elf_function *func = &functions[i_func]; > + > + if (i_func + 1 < functions_cnt) > + func->end = functions[i_func + 1].addr; > + > + for (; i_addr < count; i_addr++) { > + __u64 addr = addrs[i_addr]; > + > + /* Functions are ahead, catch up with addrs. */ > + if (addr < func->addr) > + continue; > + > + /* Addr is within function - mark function as valid. */ > + if (is_addr_in_func(addr, func, kmod)) { > + /* > + * We iterate over sorted array, so we can easily skip > + * not valid item and move following valid field into > + * its place, and still keep the 'new' array sorted. > + */ > + if (i_func != functions_valid) > + functions[functions_valid] = functions[i_func]; > + functions_valid++; > + i_addr++; > + } > > - /* Make sure function is within ftrace addresses. */ > - if (bsearch(&addr, addrs, count, sizeof(addrs[0]), addrs_cmp)) { > /* > - * We iterate over sorted array, so we can easily skip > - * not valid item and move following valid field into > - * its place, and still keep the 'new' array sorted. > + * Addrs are ahead, catch up with functions, or we just > + * found valid function and want to move to another. > */ > - if (i != functions_valid) > - functions[functions_valid] = functions[i]; > - functions_valid++; > + break; > } > } > > + if (btf_elf__verbose) { > + printf("Found %d functions out of %d symbols and %llu ftrace addresses.\n", > + functions_valid, functions_cnt, count); > + } > + > functions_cnt = functions_valid; > free(addrs); > > - if (btf_elf__verbose) > - printf("Found %d functions!\n", functions_cnt); > + /* > + * And finaly sort 'valid' functions by name, > + * so find_function can be used. > + */ > + qsort(functions, functions_cnt, sizeof(functions[0]), functions_cmp_name); > + > return 0; > } > > @@ -312,7 +359,7 @@ static struct elf_function *find_function(const struct btf_elf *btfe, > struct elf_function key = { .name = name }; > > return bsearch(&key, functions, functions_cnt, sizeof(functions[0]), > - functions_cmp); > + functions_cmp_name); > } > > static bool btf_name_char_ok(char c, bool first) >