This is the arm64 version of ftrace-based kprobes to avoid the overhead with regular kprobes, by using the ftrace infrastructure. Signed-off-by: Jianlin Lv <iecedge@xxxxxxxxx> --- .../debug/kprobes-on-ftrace/arch-support.txt | 2 +- arch/arm64/Kconfig | 1 + arch/arm64/kernel/probes/Makefile | 1 + arch/arm64/kernel/probes/kprobes-ftrace.c | 81 +++++++++++++++++++ include/linux/kprobes.h | 2 + kernel/kprobes.c | 4 +- 6 files changed, 88 insertions(+), 3 deletions(-) create mode 100644 arch/arm64/kernel/probes/kprobes-ftrace.c diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt index b3697f4c806e..fadbd4513c91 100644 --- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt +++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt @@ -9,7 +9,7 @@ | alpha: | TODO | | arc: | TODO | | arm: | TODO | - | arm64: | TODO | + | arm64: | ok | | csky: | ok | | hexagon: | TODO | | ia64: | TODO | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1652a9800ebe..7271d97e035a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -204,6 +204,7 @@ config ARM64 select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select HAVE_KPROBES + select HAVE_KPROBES_ON_FTRACE select HAVE_KRETPROBES select HAVE_GENERIC_VDSO select IOMMU_DMA if IOMMU_SUPPORT diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile index 8e4be92e25b1..dab2fa4dba81 100644 --- a/arch/arm64/kernel/probes/Makefile +++ b/arch/arm64/kernel/probes/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \ simulate-insn.o obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o \ simulate-insn.o +obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o diff --git a/arch/arm64/kernel/probes/kprobes-ftrace.c b/arch/arm64/kernel/probes/kprobes-ftrace.c new file mode 100644 index 000000000000..fcfa6b6533a0 --- /dev/null +++ b/arch/arm64/kernel/probes/kprobes-ftrace.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Dynamic Ftrace based Kprobes Optimization + * + * Copyright (C) Hitachi Ltd., 2012 + */ +#include <linux/kprobes.h> + +int arch_check_ftrace_location(struct kprobe *p) +{ + unsigned long addr = (unsigned long)p->addr; + + /* ftrace location at the BL in the callsite (<function-entry> + AARCH64_INSN_SIZE) */ + if (ftrace_location(addr) == (addr + AARCH64_INSN_SIZE)) { +#ifdef CONFIG_KPROBES_ON_FTRACE + p->flags |= KPROBE_FLAG_FTRACE; +#else /* !CONFIG_KPROBES_ON_FTRACE */ + return -EINVAL; +#endif + } + return 0; +} + +/* Ftrace callback handler for kprobes */ +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct ftrace_regs *fregs) +{ + struct pt_regs *regs; + struct kprobe *p; + struct kprobe_ctlblk *kcb; + int bit; + + bit = ftrace_test_recursion_trylock(ip, parent_ip); + if (bit < 0) + return; + + regs = ftrace_get_regs(fregs); + p = get_kprobe((kprobe_opcode_t *) (ip - AARCH64_INSN_SIZE)); + if (unlikely(!p) || kprobe_disabled(p)) + goto out; + + kcb = get_kprobe_ctlblk(); + if (kprobe_running()) { + kprobes_inc_nmissed_count(p); + } else { + unsigned long orig_ip = instruction_pointer(regs); + + instruction_pointer_set(regs, ip - AARCH64_INSN_SIZE); + + __this_cpu_write(current_kprobe, p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + if (!p->pre_handler || !p->pre_handler(p, regs)) { + /* + * Emulate singlestep (and also recover regs->pc) + * as if there is a 4byte nop + */ + instruction_pointer_set(regs, ip); + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } + instruction_pointer_set(regs, orig_ip); + } + + /* + * If pre_handler returns !0, it changes regs->pc. We have to + * skip emulating post_handler. + */ + __this_cpu_write(current_kprobe, NULL); + } +out: + ftrace_test_recursion_unlock(bit); +} +NOKPROBE_SYMBOL(kprobe_ftrace_handler); + +int arch_prepare_kprobe_ftrace(struct kprobe *p) +{ + p->ainsn.api.insn = NULL; + return 0; +} + diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 55041d2f884d..83a479908029 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -392,6 +392,8 @@ static inline int arch_prepare_kprobe_ftrace(struct kprobe *p) } #endif /* CONFIG_KPROBES_ON_FTRACE */ +int arch_check_ftrace_location(struct kprobe *p); + /* Get the kprobe at this addr (if any) - called with preemption disabled */ struct kprobe *get_kprobe(void *addr); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index f214f8c088ed..34754c2fe14e 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1534,7 +1534,7 @@ static inline int warn_kprobe_rereg(struct kprobe *p) return ret; } -static int check_ftrace_location(struct kprobe *p) +int __weak arch_check_ftrace_location(struct kprobe *p) { unsigned long addr = (unsigned long)p->addr; @@ -1553,7 +1553,7 @@ static int check_kprobe_address_safe(struct kprobe *p, { int ret; - ret = check_ftrace_location(p); + ret = arch_check_ftrace_location(p); if (ret) return ret; jump_label_lock(); -- 2.25.1