On Tue, Jan 25, 2022 at 09:11:57PM +0900, Masami Hiramatsu wrote: SNIP > + > +/* Convert ftrace location address from symbols */ > +static int convert_func_addresses(struct fprobe *fp) > +{ > + unsigned long addr, size; > + unsigned int i; > + > + /* Convert symbols to symbol address */ > + if (fp->syms) { > + fp->addrs = kcalloc(fp->nentry, sizeof(*fp->addrs), GFP_KERNEL); > + if (!fp->addrs) > + return -ENOMEM; > + > + for (i = 0; i < fp->nentry; i++) { > + fp->addrs[i] = kallsyms_lookup_name(fp->syms[i]); > + if (!fp->addrs[i]) /* Maybe wrong symbol */ > + goto error; > + } > + } > + > + /* Convert symbol address to ftrace location. */ > + for (i = 0; i < fp->nentry; i++) { > + if (!kallsyms_lookup_size_offset(fp->addrs[i], &size, NULL)) > + size = MCOUNT_INSN_SIZE; > + addr = ftrace_location_range(fp->addrs[i], fp->addrs[i] + size); you need to substract 1 from 'end' in here, as explained in __within_notrace_func comment: /* * Since ftrace_location_range() does inclusive range check, we need * to subtract 1 byte from the end address. */ like in the patch below also this convert is for archs where address from kallsyms does not match the real attach addresss, like for arm you mentioned earlier, right? could we have that arch specific, so we don't have extra heavy search loop for archs that do not need it? thanks, jirka --- diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c index 4d089dda89c2..7970418820e7 100644 --- a/kernel/trace/fprobe.c +++ b/kernel/trace/fprobe.c @@ -91,7 +91,7 @@ static int convert_func_addresses(struct fprobe *fp) for (i = 0; i < fp->nentry; i++) { if (!kallsyms_lookup_size_offset(fp->addrs[i], &size, NULL)) size = MCOUNT_INSN_SIZE; - addr = ftrace_location_range(fp->addrs[i], fp->addrs[i] + size); + addr = ftrace_location_range(fp->addrs[i], fp->addrs[i] + size - 1); if (!addr) /* No dynamic ftrace there. */ goto error; fp->addrs[i] = addr;