On 2021-03-09 09:42:10 [+0100], Thomas Gleixner wrote: > tasklet_unlock_spin_wait() spin waits for the TASKLET_STATE_SCHED bit in > the tasklet state to be cleared. This works on !RT nicely because the … Could you please fold this: diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 07c7329d21aa7..1c14ccd351091 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -663,15 +663,6 @@ static inline int tasklet_trylock(struct tasklet_struct *t) void tasklet_unlock(struct tasklet_struct *t); void tasklet_unlock_wait(struct tasklet_struct *t); -/* - * Do not use in new code. Waiting for tasklets from atomic contexts is - * error prone and should be avoided. - */ -static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) -{ - while (test_bit(TASKLET_STATE_RUN, &t->state)) - cpu_relax(); -} #else static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; } static inline void tasklet_unlock(struct tasklet_struct *t) { } diff --git a/kernel/softirq.c b/kernel/softirq.c index f0074f1344402..c9adc5c462485 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -830,8 +830,8 @@ EXPORT_SYMBOL(tasklet_init); #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) /* - * Do not use in new code. There is no real reason to invoke this from - * atomic contexts. + * Do not use in new code. Waiting for tasklets from atomic contexts is + * error prone and should be avoided. */ void tasklet_unlock_spin_wait(struct tasklet_struct *t) { -- 2.30.1