On 08/20/2022 01:25 AM, Steven Rostedt wrote:
On Fri, 19 Aug 2022 16:13:55 +0800
Qing Zhang <zhangqing@xxxxxxxxxxx> wrote:
+#define MCOUNT_STACK_SIZE (2 * SZREG)
+#define MCOUNT_S0_OFFSET (0)
+#define MCOUNT_RA_OFFSET (SZREG)
+
+ .macro MCOUNT_SAVE_REGS
+ PTR_ADDI sp, sp, -MCOUNT_STACK_SIZE
+ PTR_S s0, sp, MCOUNT_S0_OFFSET
+ PTR_S ra, sp, MCOUNT_RA_OFFSET
+ move s0, a0
+ .endm
+
+ .macro MCOUNT_RESTORE_REGS
+ move a0, s0
+ PTR_L ra, sp, MCOUNT_RA_OFFSET
+ PTR_L s0, sp, MCOUNT_S0_OFFSET
+ PTR_ADDI sp, sp, MCOUNT_STACK_SIZE
+ .endm
+
+
+SYM_FUNC_START(_mcount)
+ la t1, ftrace_stub
+ la t2, ftrace_trace_function /* Prepare t2 for (1) */
+ PTR_L t2, t2, 0
+ beq t1, t2, fgraph_trace
+
+ MCOUNT_SAVE_REGS
+
+ move a0, ra /* arg0: self return address */
+ move a1, s0 /* arg1: parent's return address */
+ jirl ra, t2, 0 /* (1) call *ftrace_trace_function */
+
+ MCOUNT_RESTORE_REGS
You know, if you can implement CONFIG_FTRACE_WITH_ARGS, where the default
function callback gets a ftrace_regs pointer (that only holds what is
needed for the arguments of the function as well as the stack pointer),
then you could also implement function graph on top of that, and remove the
need for the below "fgraph_trace" trampoline.
I'd really would like all architectures to go that way. Also, the
CONFIG_FTRACE_WITH_ARGS is all you need for live kernel patching.
Hi, Steve,
I think we have implemented CONFIG_FTRACE_WITH_ARGS in dynamic ftrace
in the [Patch3/9]. But, for non dynamic ftrace, it is hardly to
implement it. Because the LoongArch compiler gcc treats mount as a
really call, like 'call _mcount(__builtin_return_address(0))'. That
means, they decrease stack, save args to callee saved regs and may
do some optimization before calling mcount. It is difficult to find the
original args and apply changes from tracers.
Thanks,
Jinyang
+
+fgraph_trace:
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ la t1, ftrace_stub
+ la t3, ftrace_graph_return
+ PTR_L t3, t3, 0
+ bne t1, t3, ftrace_graph_caller
+ la t1, ftrace_graph_entry_stub
+ la t3, ftrace_graph_entry
+ PTR_L t3, t3, 0
+ bne t1, t3, ftrace_graph_caller
+#endif
+
+ .globl ftrace_stub
+ftrace_stub:
+ jirl zero, ra, 0
+SYM_FUNC_END(_mcount)
+EXPORT_SYMBOL(_mcount)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+SYM_FUNC_START(ftrace_graph_caller)
+ MCOUNT_SAVE_REGS
+
+ PTR_ADDI a0, ra, -4 /* arg0: Callsite self return addr */
+ PTR_ADDI a1, sp, MCOUNT_STACK_SIZE /* arg1: Callsite sp */
+ move a2, s0 /* arg2: Callsite parent ra */
+ bl prepare_ftrace_return
+
+ MCOUNT_RESTORE_REGS
+ jirl zero, ra, 0
+SYM_FUNC_END(ftrace_graph_caller)