This patch makes SOFTIRQ time accounted by "irqtime.total_soft" in use by adding soft_delay accounts for Delay accounting. Signed-off-by: Tio Zhang <tiozhang@xxxxxxxxxxxxxx> --- include/linux/delayacct.h | 11 +++++++---- kernel/delayacct.c | 5 +++-- kernel/sched/core.c | 6 ++++-- kernel/sched/sched.h | 1 + 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 6639f48dac36..bf1d45fcb505 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -49,12 +49,14 @@ struct task_delay_info { u64 wpcopy_delay; /* wait for write-protect copy */ u64 irq_delay; /* wait for IRQ/SOFTIRQ */ + u64 soft_delay; /* wait for SOFTIRQ */ u32 freepages_count; /* total count of memory reclaim */ u32 thrashing_count; /* total count of thrash waits */ u32 compact_count; /* total count of memory compact */ u32 wpcopy_count; /* total count of write-protect copy */ u32 irq_count; /* total count of IRQ/SOFTIRQ */ + u32 soft_count; /* total count of SOFTIRQ */ }; #endif @@ -84,7 +86,7 @@ extern void __delayacct_compact_start(void); extern void __delayacct_compact_end(void); extern void __delayacct_wpcopy_start(void); extern void __delayacct_wpcopy_end(void); -extern void __delayacct_irq(struct task_struct *task, u32 delta); +extern void __delayacct_irq(struct task_struct *task, u32 delta, u32 delta_soft); static inline void delayacct_tsk_init(struct task_struct *tsk) { @@ -219,13 +221,14 @@ static inline void delayacct_wpcopy_end(void) __delayacct_wpcopy_end(); } -static inline void delayacct_irq(struct task_struct *task, u32 delta) +static inline void delayacct_irq(struct task_struct *task, u32 delta, + u32 delta_soft) { if (!static_branch_unlikely(&delayacct_key)) return; if (task->delays) - __delayacct_irq(task, delta); + __delayacct_irq(task, delta, delta_soft); } #else @@ -266,7 +269,7 @@ static inline void delayacct_wpcopy_start(void) {} static inline void delayacct_wpcopy_end(void) {} -static inline void delayacct_irq(struct task_struct *task, u32 delta) +static inline void delayacct_irq(struct task_struct *task, u32 delta, u32 delta_soft) {} #endif /* CONFIG_TASK_DELAY_ACCT */ diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 6f0c358e73d8..8517f1c1df88 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -278,13 +278,14 @@ void __delayacct_wpcopy_end(void) ¤t->delays->wpcopy_count); } -void __delayacct_irq(struct task_struct *task, u32 delta) +void __delayacct_irq(struct task_struct *task, u32 delta, u32 delta_soft) { unsigned long flags; raw_spin_lock_irqsave(&task->delays->lock, flags); task->delays->irq_delay += delta; task->delays->irq_count++; + task->delays->soft_delay += delta_soft; + task->delays->soft_count++; raw_spin_unlock_irqrestore(&task->delays->lock, flags); } - diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9116bcc90346..2f5fd775b47b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -698,10 +698,11 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) * In theory, the compile should just see 0 here, and optimize out the call * to sched_rt_avg_update. But I don't trust it... */ - s64 __maybe_unused steal = 0, irq_delta = 0; + s64 __maybe_unused steal = 0, irq_delta = 0, soft_delta = 0; #ifdef CONFIG_IRQ_TIME_ACCOUNTING irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; + soft_delta = irq_time_read_soft(cpu_of(rq)) - rq->prev_soft_time; /* * Since irq_time is only updated on {soft,}irq_exit, we might run into @@ -722,9 +723,10 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) irq_delta = delta; rq->prev_irq_time += irq_delta; + rq->prev_soft_time += soft_delta; delta -= irq_delta; psi_account_irqtime(rq->curr, irq_delta); - delayacct_irq(rq->curr, irq_delta); + delayacct_irq(rq->curr, irq_delta, soft_delta); #endif #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING if (static_key_false((¶virt_steal_rq_enabled))) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f479c61b84b5..abf96ad9c301 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1105,6 +1105,7 @@ struct rq { #ifdef CONFIG_IRQ_TIME_ACCOUNTING u64 prev_irq_time; + u64 prev_soft_time; #endif #ifdef CONFIG_PARAVIRT u64 prev_steal_time; -- 2.17.1