This is a note to let you know that I've just added the patch titled s390/stacktrace: Merge perf_callchain_user() and arch_stack_walk_user() to the 6.9-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: s390-stacktrace-merge-perf_callchain_user-and-arch_s.patch and it can be found in the queue-6.9 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 020d2c2280f8847050b77792caaf796bc0de8d4b Author: Heiko Carstens <hca@xxxxxxxxxxxxx> Date: Mon Apr 29 14:28:44 2024 +0200 s390/stacktrace: Merge perf_callchain_user() and arch_stack_walk_user() [ Upstream commit ebd912ff9919a10609511383d94942362234c077 ] The two functions perf_callchain_user() and arch_stack_walk_user() are nearly identical. Reduce code duplication and add a common helper which can be called by both functions. Fixes: aa44433ac4ee ("s390: add USER_STACKTRACE support") Reviewed-by: Jens Remus <jremus@xxxxxxxxxxxxx> Signed-off-by: Heiko Carstens <hca@xxxxxxxxxxxxx> Signed-off-by: Alexander Gordeev <agordeev@xxxxxxxxxxxxx> Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h index 433fde85b14ea..4aefbe32265d8 100644 --- a/arch/s390/include/asm/stacktrace.h +++ b/arch/s390/include/asm/stacktrace.h @@ -2,6 +2,7 @@ #ifndef _ASM_S390_STACKTRACE_H #define _ASM_S390_STACKTRACE_H +#include <linux/stacktrace.h> #include <linux/uaccess.h> #include <linux/ptrace.h> @@ -12,6 +13,12 @@ struct stack_frame_user { unsigned long empty2[4]; }; +struct perf_callchain_entry_ctx; + +void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie, + struct perf_callchain_entry_ctx *entry, + const struct pt_regs *regs, bool perf); + enum stack_type { STACK_TYPE_UNKNOWN, STACK_TYPE_TASK, diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index dfa77da2fd2ec..5fff629b1a898 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -218,39 +218,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { - struct stack_frame_user __user *sf; - unsigned long ip, sp; - bool first = true; - - if (is_compat_task()) - return; - perf_callchain_store(entry, instruction_pointer(regs)); - sf = (void __user *)user_stack_pointer(regs); - pagefault_disable(); - while (entry->nr < entry->max_stack) { - if (__get_user(sp, &sf->back_chain)) - break; - if (__get_user(ip, &sf->gprs[8])) - break; - if (ip & 0x1) { - /* - * If the instruction address is invalid, and this - * is the first stack frame, assume r14 has not - * been written to the stack yet. Otherwise exit. - */ - if (first && !(regs->gprs[14] & 0x1)) - ip = regs->gprs[14]; - else - break; - } - perf_callchain_store(entry, ip); - /* Sanity check: ABI requires SP to be aligned 8 bytes. */ - if (!sp || sp & 0x7) - break; - sf = (void __user *)sp; - first = false; - } - pagefault_enable(); + arch_stack_walk_user_common(NULL, NULL, entry, regs, true); } /* Perf definitions for PMU event attributes in sysfs */ diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 7c294da45bf52..e580d4cd2729a 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -5,6 +5,7 @@ * Copyright IBM Corp. 2006 */ +#include <linux/perf_event.h> #include <linux/stacktrace.h> #include <linux/uaccess.h> #include <linux/compat.h> @@ -62,8 +63,23 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, return 0; } -void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, - const struct pt_regs *regs) +static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie, + struct perf_callchain_entry_ctx *entry, bool perf, + unsigned long ip) +{ +#ifdef CONFIG_PERF_EVENTS + if (perf) { + if (perf_callchain_store(entry, ip)) + return false; + return true; + } +#endif + return consume_entry(cookie, ip); +} + +void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie, + struct perf_callchain_entry_ctx *entry, + const struct pt_regs *regs, bool perf) { struct stack_frame_user __user *sf; unsigned long ip, sp; @@ -71,7 +87,8 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, if (is_compat_task()) return; - if (!consume_entry(cookie, instruction_pointer(regs))) + ip = instruction_pointer(regs); + if (!store_ip(consume_entry, cookie, entry, perf, ip)) return; sf = (void __user *)user_stack_pointer(regs); pagefault_disable(); @@ -91,8 +108,8 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, else break; } - if (!consume_entry(cookie, ip)) - break; + if (!store_ip(consume_entry, cookie, entry, perf, ip)) + return; /* Sanity check: ABI requires SP to be aligned 8 bytes. */ if (!sp || sp & 0x7) break; @@ -102,6 +119,12 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, pagefault_enable(); } +void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, + const struct pt_regs *regs) +{ + arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false); +} + unsigned long return_address(unsigned int n) { struct unwind_state state;