Now that the flags migration in the common syscall entry is complete and the code relies exclusively on syscall_work, clean up the accesses to TI flags in that path. Signed-off-by: Gabriel Krisman Bertazi <krisman@xxxxxxxxxxxxx> --- Changes since v2: - Fix subsystem prefix (tglx) --- include/linux/entry-common.h | 20 +++++++++----------- kernel/entry/common.c | 17 +++++++---------- 2 files changed, 16 insertions(+), 21 deletions(-) diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index afeb927e8545..cffd8bf1e085 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -26,31 +26,29 @@ #endif /* - * TIF flags handled in syscall_enter_from_user_mode() + * SYSCALL_WORK flags handled in syscall_enter_from_user_mode() */ -#ifndef ARCH_SYSCALL_ENTER_WORK -# define ARCH_SYSCALL_ENTER_WORK (0) +#ifndef ARCH_SYSCALL_WORK_ENTER +# define ARCH_SYSCALL_WORK_ENTER (0) #endif -#define SYSCALL_ENTER_WORK ARCH_SYSCALL_ENTER_WORK - /* * TIF flags handled in syscall_exit_to_user_mode() */ -#ifndef ARCH_SYSCALL_EXIT_WORK -# define ARCH_SYSCALL_EXIT_WORK (0) +#ifndef ARCH_SYSCALL_WORK_EXIT +# define ARCH_SYSCALL_WORK_EXIT (0) #endif -#define SYSCALL_EXIT_WORK ARCH_SYSCALL_EXIT_WORK - #define SYSCALL_WORK_ENTER (SYSCALL_WORK_SECCOMP | \ SYSCALL_WORK_SYSCALL_TRACEPOINT | \ SYSCALL_WORK_SYSCALL_TRACE | \ SYSCALL_WORK_SYSCALL_EMU | \ - SYSCALL_WORK_SYSCALL_AUDIT) + SYSCALL_WORK_SYSCALL_AUDIT | \ + ARCH_SYSCALL_WORK_ENTER) #define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \ SYSCALL_WORK_SYSCALL_TRACE | \ - SYSCALL_WORK_SYSCALL_AUDIT) + SYSCALL_WORK_SYSCALL_AUDIT | \ + ARCH_SYSCALL_WORK_EXIT) /* * TIF flags handled in exit_to_user_mode_loop() diff --git a/kernel/entry/common.c b/kernel/entry/common.c index a7233cca01ba..61b6936a0623 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -42,7 +42,7 @@ static inline void syscall_enter_audit(struct pt_regs *regs, long syscall) } static long syscall_trace_enter(struct pt_regs *regs, long syscall, - unsigned long ti_work, unsigned long work) + unsigned long work) { long ret = 0; @@ -75,11 +75,9 @@ static __always_inline long __syscall_enter_from_user_work(struct pt_regs *regs, long syscall) { unsigned long work = READ_ONCE(current_thread_info()->syscall_work); - unsigned long ti_work; - ti_work = READ_ONCE(current_thread_info()->flags); - if (work & SYSCALL_WORK_ENTER || ti_work & SYSCALL_ENTER_WORK) - syscall = syscall_trace_enter(regs, syscall, ti_work, work); + if (work & SYSCALL_WORK_ENTER) + syscall = syscall_trace_enter(regs, syscall, work); return syscall; } @@ -227,8 +225,8 @@ static inline bool report_single_step(unsigned long work) } #endif -static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work, - unsigned long work) + +static void syscall_exit_work(struct pt_regs *regs, unsigned long work) { bool step; @@ -249,7 +247,6 @@ static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work, static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs) { unsigned long work = READ_ONCE(current_thread_info()->syscall_work); - u32 cached_flags = READ_ONCE(current_thread_info()->flags); unsigned long nr = syscall_get_nr(current, regs); CT_WARN_ON(ct_state() != CONTEXT_KERNEL); @@ -266,8 +263,8 @@ static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs) * enabled, we want to run them exactly once per syscall exit with * interrupts enabled. */ - if (unlikely(work & SYSCALL_WORK_EXIT || cached_flags & SYSCALL_EXIT_WORK)) - syscall_exit_work(regs, cached_flags, work); + if (unlikely(work & SYSCALL_WORK_EXIT)) + syscall_exit_work(regs, work); } __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs) -- 2.29.2