The context_tracking.state RCU_DYNTICKS subvariable has been renamed to RCU_WATCHING, and the 'dynticks' prefix can be dropped without losing any meaning. Suggested-by: Frederic Weisbecker <frederic@xxxxxxxxxx> Signed-off-by: Valentin Schneider <vschneid@xxxxxxxxxx> --- kernel/context_tracking.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index 647939b0befd9..9c8f7b9191cd4 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(context_tracking); #define TPS(x) tracepoint_string(x) /* Record the current task on dyntick-idle entry. */ -static __always_inline void rcu_dynticks_task_enter(void) +static __always_inline void rcu_task_enter(void) { #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); @@ -47,7 +47,7 @@ static __always_inline void rcu_dynticks_task_enter(void) } /* Record no current task on dyntick-idle exit. */ -static __always_inline void rcu_dynticks_task_exit(void) +static __always_inline void rcu_task_exit(void) { #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); @@ -55,7 +55,7 @@ static __always_inline void rcu_dynticks_task_exit(void) } /* Turn on heavyweight RCU tasks trace readers on idle/user entry. */ -static __always_inline void rcu_dynticks_task_trace_enter(void) +static __always_inline void rcu_task_trace_enter(void) { #ifdef CONFIG_TASKS_TRACE_RCU if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) @@ -64,7 +64,7 @@ static __always_inline void rcu_dynticks_task_trace_enter(void) } /* Turn off heavyweight RCU tasks trace readers on idle/user exit. */ -static __always_inline void rcu_dynticks_task_trace_exit(void) +static __always_inline void rcu_task_trace_exit(void) { #ifdef CONFIG_TASKS_TRACE_RCU if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) @@ -87,7 +87,7 @@ static noinstr void ct_kernel_exit_state(int offset) * critical sections, and we also must force ordering with the * next idle sojourn. */ - rcu_dynticks_task_trace_enter(); // Before ->dynticks update! + rcu_task_trace_enter(); // Before ->dynticks update! seq = ct_state_inc(offset); // RCU is no longer watching. Better be in extended quiescent state! WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & CT_RCU_WATCHING)); @@ -109,7 +109,7 @@ static noinstr void ct_kernel_enter_state(int offset) */ seq = ct_state_inc(offset); // RCU is now watching. Better not be in an extended quiescent state! - rcu_dynticks_task_trace_exit(); // After ->dynticks update! + rcu_task_trace_exit(); // After ->dynticks update! WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & CT_RCU_WATCHING)); } @@ -149,7 +149,7 @@ static void noinstr ct_kernel_exit(bool user, int offset) // RCU is watching here ... ct_kernel_exit_state(offset); // ... but is no longer watching here. - rcu_dynticks_task_enter(); + rcu_task_enter(); } /* @@ -173,7 +173,7 @@ static void noinstr ct_kernel_enter(bool user, int offset) ct->nesting++; return; } - rcu_dynticks_task_exit(); + rcu_task_exit(); // RCU is not watching here ... ct_kernel_enter_state(offset); // ... but is watching here. @@ -240,7 +240,7 @@ void noinstr ct_nmi_exit(void) // ... but is no longer watching here. if (!in_nmi()) - rcu_dynticks_task_enter(); + rcu_task_enter(); } /** @@ -274,7 +274,7 @@ void noinstr ct_nmi_enter(void) if (rcu_dynticks_curr_cpu_in_eqs()) { if (!in_nmi()) - rcu_dynticks_task_exit(); + rcu_task_exit(); // RCU is not watching here ... ct_kernel_enter_state(CT_RCU_WATCHING); -- 2.43.0