From: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Reduce the softirq timeout when it is preempting an RT task. Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Signed-off-by: Liu Jian <liujian56@xxxxxxxxxx> --- kernel/softirq.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/kernel/softirq.c b/kernel/softirq.c index e2cad5d108c8..baa08ae1604f 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -522,12 +522,12 @@ static inline void lockdep_softirq_end(bool in_hardirq) { } #define MAX_SOFTIRQ_TIME (2 * NSEC_PER_MSEC) #define MAX_SOFTIRQ_RESTART 10 -static inline bool __softirq_needs_break(u64 start) +static inline bool __softirq_needs_break(u64 start, u64 timo) { if (need_resched()) return true; - if (sched_clock() - start >= MAX_SOFTIRQ_TIME) + if (sched_clock() - start >= timo) return true; return false; @@ -537,6 +537,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) { unsigned int max_restart = MAX_SOFTIRQ_RESTART; unsigned long old_flags = current->flags; + u64 timo = MAX_SOFTIRQ_TIME; u64 start = sched_clock(); struct softirq_action *h; unsigned long pending; @@ -556,6 +557,9 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) in_hardirq = lockdep_softirq_start(); account_softirq_enter(current); + if (__this_cpu_read(ksoftirqd) != current && task_is_realtime(current)) + timo >>= 2; + restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); @@ -583,7 +587,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) preempt_count_set(prev_count); } - if (pending && __softirq_needs_break(start)) + if (pending && __softirq_needs_break(start, timo)) break; } @@ -596,7 +600,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) if (pending) or_softirq_pending(pending); else if ((pending = local_softirq_pending()) && - !__softirq_needs_break(start) && + !__softirq_needs_break(start, timo) && --max_restart) goto restart; -- 2.34.1