On 3 Oct 2022 23:20:32 +0000 John Stultz <jstultz@xxxxxxxxxx> > +#ifdef CONFIG_RT_SOFTIRQ_OPTIMIZATION > +#define __use_softirq_opt 1 > +/* > + * Return whether the given cpu is currently non-preemptible > + * while handling a potentially long softirq, or if the current > + * task is likely to block preemptions soon because it is a > + * ksoftirq thread that is handling slow softirq. > + */ > +static bool cpu_busy_with_softirqs(int cpu) > +{ > + u32 softirqs = per_cpu(active_softirqs, cpu) | > + __cpu_softirq_pending(cpu); > + struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu); > + struct task_struct *curr; > + struct rq *rq = cpu_rq(cpu); > + int ret; > + > + rcu_read_lock(); > + curr = READ_ONCE(rq->curr); /* unlocked access */ > + ret = (softirqs & LONG_SOFTIRQ_MASK) && > + (curr == cpu_ksoftirqd || > + preempt_count() & SOFTIRQ_MASK); > + rcu_read_unlock(); > + return ret; > +} > +#else > +#define __use_softirq_opt 0 > +static bool cpu_busy_with_softirqs(int cpu) > +{ > + return false; > +} > +#endif /* CONFIG_RT_SOFTIRQ_OPTIMIZATION */ > + > +static bool rt_task_fits_cpu(struct task_struct *p, int cpu) > +{ > + return !cpu_busy_with_softirqs(cpu) && rt_task_fits_capacity(p, cpu); > +} On one hand, RT task is not layency sensitive enough if it fails to preempt ksoftirqd. On the other, deferring softirq to ksoftirqd barely makes sense in 3/3 if it preempts the current RT task.