On Mon, Nov 20, 2023 at 03:54:18PM -0500, Mathieu Desnoyers wrote: > diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c > index de753403cdaf..718a0723a0bc 100644 > --- a/kernel/trace/trace_syscalls.c > +++ b/kernel/trace/trace_syscalls.c > @@ -299,27 +299,33 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) > int syscall_nr; > int size; > > + /* > + * Probe called with preemption enabled (may_fault), but ring buffer and > + * per-cpu data require preemption to be disabled. > + */ > + preempt_disable_notrace(); guard(preempt_notrace)(); and ditch all the goto crap. > + > syscall_nr = trace_get_syscall_nr(current, regs); > if (syscall_nr < 0 || syscall_nr >= NR_syscalls) > - return; > + goto end; > > /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ > trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); > if (!trace_file) > - return; > + goto end; > > if (trace_trigger_soft_disabled(trace_file)) > - return; > + goto end; > > sys_data = syscall_nr_to_meta(syscall_nr); > if (!sys_data) > - return; > + goto end; > > size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; > > entry = trace_event_buffer_reserve(&fbuffer, trace_file, size); > if (!entry) > - return; > + goto end; > > entry = ring_buffer_event_data(fbuffer.event); > entry->nr = syscall_nr; > @@ -327,6 +333,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) > memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args); > > trace_event_buffer_commit(&fbuffer); > +end: > + preempt_enable_notrace(); > } > > static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) > @@ -338,31 +346,39 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) > struct trace_event_buffer fbuffer; > int syscall_nr; > > + /* > + * Probe called with preemption enabled (may_fault), but ring buffer and > + * per-cpu data require preemption to be disabled. > + */ > + preempt_disable_notrace(); Idem. > + > syscall_nr = trace_get_syscall_nr(current, regs); > if (syscall_nr < 0 || syscall_nr >= NR_syscalls) > - return; > + goto end; > > /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ > trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); > if (!trace_file) > - return; > + goto end; > > if (trace_trigger_soft_disabled(trace_file)) > - return; > + goto end; > > sys_data = syscall_nr_to_meta(syscall_nr); > if (!sys_data) > - return; > + goto end; > > entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry)); > if (!entry) > - return; > + goto end; > > entry = ring_buffer_event_data(fbuffer.event); > entry->nr = syscall_nr; > entry->ret = syscall_get_return_value(current, regs); > > trace_event_buffer_commit(&fbuffer); > +end: > + preempt_enable_notrace(); > } > > static int reg_event_syscall_enter(struct trace_event_file *file, > @@ -377,7 +393,9 @@ static int reg_event_syscall_enter(struct trace_event_file *file, > return -ENOSYS; > mutex_lock(&syscall_trace_lock); > if (!tr->sys_refcount_enter) > - ret = register_trace_sys_enter(ftrace_syscall_enter, tr); > + ret = register_trace_prio_flags_sys_enter(ftrace_syscall_enter, tr, > + TRACEPOINT_DEFAULT_PRIO, > + TRACEPOINT_MAY_FAULT); > if (!ret) { > rcu_assign_pointer(tr->enter_syscall_files[num], file); > tr->sys_refcount_enter++; > @@ -415,7 +433,9 @@ static int reg_event_syscall_exit(struct trace_event_file *file, > return -ENOSYS; > mutex_lock(&syscall_trace_lock); > if (!tr->sys_refcount_exit) > - ret = register_trace_sys_exit(ftrace_syscall_exit, tr); > + ret = register_trace_prio_flags_sys_exit(ftrace_syscall_exit, tr, > + TRACEPOINT_DEFAULT_PRIO, > + TRACEPOINT_MAY_FAULT); > if (!ret) { > rcu_assign_pointer(tr->exit_syscall_files[num], file); > tr->sys_refcount_exit++; /me hands you a bucket of {}, free of charge. > @@ -582,20 +602,26 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) > int rctx; > int size; > > + /* > + * Probe called with preemption enabled (may_fault), but ring buffer and > + * per-cpu data require preemption to be disabled. > + */ > + preempt_disable_notrace(); Again, guard(preempt_notrace)(); > + > syscall_nr = trace_get_syscall_nr(current, regs); > if (syscall_nr < 0 || syscall_nr >= NR_syscalls) > - return; > + goto end; > if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) > - return; > + goto end; > > sys_data = syscall_nr_to_meta(syscall_nr); > if (!sys_data) > - return; > + goto end; > > head = this_cpu_ptr(sys_data->enter_event->perf_events); > valid_prog_array = bpf_prog_array_valid(sys_data->enter_event); > if (!valid_prog_array && hlist_empty(head)) > - return; > + goto end; > > /* get the size after alignment with the u32 buffer size field */ > size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); > @@ -604,7 +630,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) > > rec = perf_trace_buf_alloc(size, NULL, &rctx); > if (!rec) > - return; > + goto end; > > rec->nr = syscall_nr; > syscall_get_arguments(current, regs, args); > @@ -614,12 +640,14 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) > !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) || > hlist_empty(head)) { > perf_swevent_put_recursion_context(rctx); > - return; > + goto end; > } > > perf_trace_buf_submit(rec, size, rctx, > sys_data->enter_event->event.type, 1, regs, > head, NULL); > +end: > + preempt_enable_notrace(); > } > > static int perf_sysenter_enable(struct trace_event_call *call) > @@ -631,7 +659,9 @@ static int perf_sysenter_enable(struct trace_event_call *call) > > mutex_lock(&syscall_trace_lock); > if (!sys_perf_refcount_enter) > - ret = register_trace_sys_enter(perf_syscall_enter, NULL); > + ret = register_trace_prio_flags_sys_enter(perf_syscall_enter, NULL, > + TRACEPOINT_DEFAULT_PRIO, > + TRACEPOINT_MAY_FAULT); More {} > if (ret) { > pr_info("event trace: Could not activate syscall entry trace point"); > } else { > @@ -682,20 +712,26 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) > int rctx; > int size; > > + /* > + * Probe called with preemption enabled (may_fault), but ring buffer and > + * per-cpu data require preemption to be disabled. > + */ > + preempt_disable_notrace(); Guess? > + > syscall_nr = trace_get_syscall_nr(current, regs); > if (syscall_nr < 0 || syscall_nr >= NR_syscalls) > - return; > + goto end; > if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) > - return; > + goto end; > > sys_data = syscall_nr_to_meta(syscall_nr); > if (!sys_data) > - return; > + goto end; > > head = this_cpu_ptr(sys_data->exit_event->perf_events); > valid_prog_array = bpf_prog_array_valid(sys_data->exit_event); > if (!valid_prog_array && hlist_empty(head)) > - return; > + goto end; > > /* We can probably do that at build time */ > size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); > @@ -703,7 +739,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) > > rec = perf_trace_buf_alloc(size, NULL, &rctx); > if (!rec) > - return; > + goto end; > > rec->nr = syscall_nr; > rec->ret = syscall_get_return_value(current, regs); > @@ -712,11 +748,13 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) > !perf_call_bpf_exit(sys_data->exit_event, regs, rec)) || > hlist_empty(head)) { > perf_swevent_put_recursion_context(rctx); > - return; > + goto end; > } > > perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type, > 1, regs, head, NULL); > +end: > + preempt_enable_notrace(); > } > > static int perf_sysexit_enable(struct trace_event_call *call) > @@ -728,7 +766,9 @@ static int perf_sysexit_enable(struct trace_event_call *call) > > mutex_lock(&syscall_trace_lock); > if (!sys_perf_refcount_exit) > - ret = register_trace_sys_exit(perf_syscall_exit, NULL); > + ret = register_trace_prio_flags_sys_exit(perf_syscall_exit, NULL, > + TRACEPOINT_DEFAULT_PRIO, > + TRACEPOINT_MAY_FAULT); And yet more {} > if (ret) { > pr_info("event trace: Could not activate syscall exit trace point"); > } else { > -- > 2.25.1 >