On Thu, Feb 11, 2021 at 11:59:02AM -0800, Andrii Nakryiko wrote: SNIP > > So in my previous example I assumed we have address ranges for ftrace > section, which is exactly the opposite from what we have. So this > binary search should be a bit different. start <= addr seems wrong > here as well. > > The invariant here should be that addr[r] is the smallest address that > is >= than function start addr, right? Except the corner case where > there is no such r, but for that we have a final check in the return > below. If you wanted to use index l, you'd need to change the > invariant to find the largest addr, such that it is < end, but that > seems a bit convoluted. > > So, with that, I think it should be like this: > > size_t l = 0, r = count - 1, m; > > /* make sure we don't use invalid r */ > if (count == 0) return false; > > while (l < r) { > /* note no +1 in this case, it's so that at the end, when you > * have, say, l = 0, and r = 1, you try l first, not r. > * Otherwise you might end in in the infinite loop when r never == l. > */ > m = l + (r - l) / 2; > addr = addrs[m]; > > if (addr >= start) > /* we satisfy invariant, so tighten r */ > r = m; > else > /* m is not good enough as l, maybe m + 1 will be */ > l = m + 1; > } > > return start <= addrs[r] && addrs[r] < end; > > > So, basically, r is maintained as a valid index always, while we > constantly try to tighten the l. > > Does this make sense? another take ;-) jirka --- diff --git a/btf_encoder.c b/btf_encoder.c index b124ec20a689..20a93ed60e52 100644 --- a/btf_encoder.c +++ b/btf_encoder.c @@ -36,6 +36,7 @@ struct funcs_layout { struct elf_function { const char *name; unsigned long addr; + unsigned long size; unsigned long sh_addr; bool generated; }; @@ -44,7 +45,7 @@ static struct elf_function *functions; static int functions_alloc; static int functions_cnt; -static int functions_cmp(const void *_a, const void *_b) +static int functions_cmp_name(const void *_a, const void *_b) { const struct elf_function *a = _a; const struct elf_function *b = _b; @@ -52,6 +53,16 @@ static int functions_cmp(const void *_a, const void *_b) return strcmp(a->name, b->name); } +static int functions_cmp_addr(const void *_a, const void *_b) +{ + const struct elf_function *a = _a; + const struct elf_function *b = _b; + + if (a->addr == b->addr) + return 0; + return a->addr < b->addr ? -1 : 1; +} + static void delete_functions(void) { free(functions); @@ -98,6 +109,7 @@ static int collect_function(struct btf_elf *btfe, GElf_Sym *sym, functions[functions_cnt].name = name; functions[functions_cnt].addr = elf_sym__value(sym); + functions[functions_cnt].size = elf_sym__size(sym); functions[functions_cnt].sh_addr = sh.sh_addr; functions[functions_cnt].generated = false; functions_cnt++; @@ -236,6 +248,48 @@ get_kmod_addrs(struct btf_elf *btfe, __u64 **paddrs, __u64 *pcount) return 0; } +static int is_ftrace_func(struct elf_function *func, __u64 *addrs, + __u64 count, bool kmod) +{ + /* + * For vmlinux image both addrs[x] and functions[x]::addr + * values are final address and are comparable. + * + * For kernel module addrs[x] is final address, but + * functions[x]::addr is relative address within section + * and needs to be relocated by adding sh_addr. + */ + __u64 start = kmod ? func->addr + func->sh_addr : func->addr; + __u64 addr, end = func->addr + func->size; + + /* + * The invariant here is addr[r] that is the smallest address + * that is >= than function start addr. Except the corner case + * where there is no such r, but for that we have a final check + * in the return. + */ + size_t l = 0, r = count - 1, m; + + /* make sure we don't use invalid r */ + if (count == 0) + return false; + + while (l < r) { + m = l + (r - l) / 2; + addr = addrs[m]; + + if (addr >= start) { + /* we satisfy invariant, so tighten r */ + r = m; + } else { + /* m is not good enough as l, maybe m + 1 will be */ + l = m + 1; + } + } + + return start <= addrs[r] && addrs[r] < end; +} + static int setup_functions(struct btf_elf *btfe, struct funcs_layout *fl) { __u64 *addrs, count, i; @@ -267,7 +321,7 @@ static int setup_functions(struct btf_elf *btfe, struct funcs_layout *fl) } qsort(addrs, count, sizeof(addrs[0]), addrs_cmp); - qsort(functions, functions_cnt, sizeof(functions[0]), functions_cmp); + qsort(functions, functions_cnt, sizeof(functions[0]), functions_cmp_addr); /* * Let's got through all collected functions and filter @@ -275,18 +329,9 @@ static int setup_functions(struct btf_elf *btfe, struct funcs_layout *fl) */ for (i = 0; i < functions_cnt; i++) { struct elf_function *func = &functions[i]; - /* - * For vmlinux image both addrs[x] and functions[x]::addr - * values are final address and are comparable. - * - * For kernel module addrs[x] is final address, but - * functions[x]::addr is relative address within section - * and needs to be relocated by adding sh_addr. - */ - __u64 addr = kmod ? func->addr + func->sh_addr : func->addr; /* Make sure function is within ftrace addresses. */ - if (bsearch(&addr, addrs, count, sizeof(addrs[0]), addrs_cmp)) { + if (is_ftrace_func(func, addrs, count, kmod)) { /* * We iterate over sorted array, so we can easily skip * not valid item and move following valid field into @@ -303,6 +348,8 @@ static int setup_functions(struct btf_elf *btfe, struct funcs_layout *fl) if (btf_elf__verbose) printf("Found %d functions!\n", functions_cnt); + + qsort(functions, functions_cnt, sizeof(functions[0]), functions_cmp_name); return 0; } @@ -312,7 +359,7 @@ static struct elf_function *find_function(const struct btf_elf *btfe, struct elf_function key = { .name = name }; return bsearch(&key, functions, functions_cnt, sizeof(functions[0]), - functions_cmp); + functions_cmp_name); } static bool btf_name_char_ok(char c, bool first)