Move unwind() to stacktrace/common.h, and as a result the kernel unwind_next() to asm/stacktrace.h. This allow reusing unwind() in the implementation of the nVHE HYP stack unwinder, later in the series. Signed-off-by: Kalesh Singh <kaleshsingh@xxxxxxxxxx> --- arch/arm64/include/asm/stacktrace.h | 51 ++++++++++++++++ arch/arm64/include/asm/stacktrace/common.h | 19 ++++++ arch/arm64/kernel/stacktrace.c | 67 ---------------------- 3 files changed, 70 insertions(+), 67 deletions(-) diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index a4f8b84fb459..4fa07f0f913d 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -11,6 +11,7 @@ #include <linux/llist.h> #include <asm/memory.h> +#include <asm/pointer_auth.h> #include <asm/ptrace.h> #include <asm/sdei.h> @@ -78,4 +79,54 @@ static inline bool on_accessible_stack(const struct task_struct *tsk, return false; } + +/* + * Unwind from one frame record (A) to the next frame record (B). + * + * We terminate early if the location of B indicates a malformed chain of frame + * records (e.g. a cycle), determined based on the location and fp value of A + * and the location (but not the fp value) of B. + */ +static inline int notrace unwind_next(struct unwind_state *state) +{ + struct task_struct *tsk = state->task; + unsigned long fp = state->fp; + struct stack_info info; + int err; + + /* Final frame; nothing to unwind */ + if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) + return -ENOENT; + + err = unwind_next_common(state, &info, NULL); + if (err) + return err; + + state->pc = ptrauth_strip_insn_pac(state->pc); + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + if (tsk->ret_stack && + (state->pc == (unsigned long)return_to_handler)) { + unsigned long orig_pc; + /* + * This is a case where function graph tracer has + * modified a return address (LR) in a stack frame + * to hook a function return. + * So replace it to an original value. + */ + orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc, + (void *)state->fp); + if (WARN_ON_ONCE(state->pc == orig_pc)) + return -EINVAL; + state->pc = orig_pc; + } +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +#ifdef CONFIG_KRETPROBES + if (is_kretprobe_trampoline(state->pc)) + state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur); +#endif + + return 0; +} +NOKPROBE_SYMBOL(unwind_next); #endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index 5f5d74a286f3..f86efe71479d 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -9,6 +9,7 @@ #include <linux/bitmap.h> #include <linux/bitops.h> +#include <linux/kprobes.h> #include <linux/types.h> enum stack_type { @@ -69,6 +70,8 @@ static inline bool on_accessible_stack(const struct task_struct *tsk, unsigned long sp, unsigned long size, struct stack_info *info); +static inline int unwind_next(struct unwind_state *state); + static inline bool on_stack(unsigned long sp, unsigned long size, unsigned long low, unsigned long high, enum stack_type type, struct stack_info *info) @@ -192,4 +195,20 @@ static inline int unwind_next_common(struct unwind_state *state, return 0; } + +static inline void notrace unwind(struct unwind_state *state, + stack_trace_consume_fn consume_entry, + void *cookie) +{ + while (1) { + int ret; + + if (!consume_entry(cookie, state->pc)) + break; + ret = unwind_next(state); + if (ret < 0) + break; + } +} +NOKPROBE_SYMBOL(unwind); #endif /* __ASM_STACKTRACE_COMMON_H */ diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index eef3cf6bf2d7..9fa60ee48499 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -7,14 +7,12 @@ #include <linux/kernel.h> #include <linux/export.h> #include <linux/ftrace.h> -#include <linux/kprobes.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task_stack.h> #include <linux/stacktrace.h> #include <asm/irq.h> -#include <asm/pointer_auth.h> #include <asm/stack_pointer.h> #include <asm/stacktrace.h> @@ -69,71 +67,6 @@ static inline void unwind_init_from_task(struct unwind_state *state, state->pc = thread_saved_pc(task); } -/* - * Unwind from one frame record (A) to the next frame record (B). - * - * We terminate early if the location of B indicates a malformed chain of frame - * records (e.g. a cycle), determined based on the location and fp value of A - * and the location (but not the fp value) of B. - */ -static int notrace unwind_next(struct unwind_state *state) -{ - struct task_struct *tsk = state->task; - unsigned long fp = state->fp; - struct stack_info info; - int err; - - /* Final frame; nothing to unwind */ - if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) - return -ENOENT; - - err = unwind_next_common(state, &info, NULL); - if (err) - return err; - - state->pc = ptrauth_strip_insn_pac(state->pc); - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - if (tsk->ret_stack && - (state->pc == (unsigned long)return_to_handler)) { - unsigned long orig_pc; - /* - * This is a case where function graph tracer has - * modified a return address (LR) in a stack frame - * to hook a function return. - * So replace it to an original value. - */ - orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc, - (void *)state->fp); - if (WARN_ON_ONCE(state->pc == orig_pc)) - return -EINVAL; - state->pc = orig_pc; - } -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -#ifdef CONFIG_KRETPROBES - if (is_kretprobe_trampoline(state->pc)) - state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur); -#endif - - return 0; -} -NOKPROBE_SYMBOL(unwind_next); - -static void notrace unwind(struct unwind_state *state, - stack_trace_consume_fn consume_entry, void *cookie) -{ - while (1) { - int ret; - - if (!consume_entry(cookie, state->pc)) - break; - ret = unwind_next(state); - if (ret < 0) - break; - } -} -NOKPROBE_SYMBOL(unwind); - static bool dump_backtrace_entry(void *arg, unsigned long where) { char *loglvl = arg; -- 2.37.0.170.g444d1eabd0-goog _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm