Re: [PATCH v8 1/4] sched: Define sched_clock_irqtime as static key

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, 3 Jan 2025 at 03:24, Yafang Shao <laoar.shao@xxxxxxxxx> wrote:
>
> Since CPU time accounting is a performance-critical path, let's define
> sched_clock_irqtime as a static key to minimize potential overhead.
>
> Signed-off-by: Yafang Shao <laoar.shao@xxxxxxxxx>
> Reviewed-by: Michal Koutný <mkoutny@xxxxxxxx>

Reviewed-by: Vincent Guittot <vincent.guittot@xxxxxxxxxx>


> ---
>  kernel/sched/cputime.c | 16 +++++++---------
>  kernel/sched/sched.h   | 13 +++++++++++++
>  2 files changed, 20 insertions(+), 9 deletions(-)
>
> diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
> index 0bed0fa1acd9..5d9143dd0879 100644
> --- a/kernel/sched/cputime.c
> +++ b/kernel/sched/cputime.c
> @@ -9,6 +9,8 @@
>
>  #ifdef CONFIG_IRQ_TIME_ACCOUNTING
>
> +DEFINE_STATIC_KEY_FALSE(sched_clock_irqtime);
> +
>  /*
>   * There are no locks covering percpu hardirq/softirq time.
>   * They are only modified in vtime_account, on corresponding CPU
> @@ -22,16 +24,14 @@
>   */
>  DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
>
> -static int sched_clock_irqtime;
> -
>  void enable_sched_clock_irqtime(void)
>  {
> -       sched_clock_irqtime = 1;
> +       static_branch_enable(&sched_clock_irqtime);
>  }
>
>  void disable_sched_clock_irqtime(void)
>  {
> -       sched_clock_irqtime = 0;
> +       static_branch_disable(&sched_clock_irqtime);
>  }
>
>  static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
> @@ -57,7 +57,7 @@ void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
>         s64 delta;
>         int cpu;
>
> -       if (!sched_clock_irqtime)
> +       if (!irqtime_enabled())
>                 return;
>
>         cpu = smp_processor_id();
> @@ -90,8 +90,6 @@ static u64 irqtime_tick_accounted(u64 maxtime)
>
>  #else /* CONFIG_IRQ_TIME_ACCOUNTING */
>
> -#define sched_clock_irqtime    (0)
> -
>  static u64 irqtime_tick_accounted(u64 dummy)
>  {
>         return 0;
> @@ -478,7 +476,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
>         if (vtime_accounting_enabled_this_cpu())
>                 return;
>
> -       if (sched_clock_irqtime) {
> +       if (irqtime_enabled()) {
>                 irqtime_account_process_tick(p, user_tick, 1);
>                 return;
>         }
> @@ -507,7 +505,7 @@ void account_idle_ticks(unsigned long ticks)
>  {
>         u64 cputime, steal;
>
> -       if (sched_clock_irqtime) {
> +       if (irqtime_enabled()) {
>                 irqtime_account_idle_ticks(ticks);
>                 return;
>         }
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index aef716c41edb..7e8c73110884 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -3233,6 +3233,12 @@ struct irqtime {
>  };
>
>  DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
> +DECLARE_STATIC_KEY_FALSE(sched_clock_irqtime);
> +
> +static inline int irqtime_enabled(void)
> +{
> +       return static_branch_likely(&sched_clock_irqtime);
> +}
>
>  /*
>   * Returns the irqtime minus the softirq time computed by ksoftirqd.
> @@ -3253,6 +3259,13 @@ static inline u64 irq_time_read(int cpu)
>         return total;
>  }
>
> +#else
> +
> +static inline int irqtime_enabled(void)
> +{
> +       return 0;
> +}
> +
>  #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
>
>  #ifdef CONFIG_CPU_FREQ
> --
> 2.43.5
>





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Security]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]     [Monitors]

  Powered by Linux