napi_schedule() is expected to be called either: * From an interrupt, where raised softirqs are handled on IRQ exit * From a softirq disabled section, where raised softirqs are handled on the next call to local_bh_enable(). * From a softirq handler, where raised softirqs are handled on the next round in do_softirq(), or further deferred to a dedicated kthread. Other bare tasks context may end up ignoring the raised NET_RX vector until the next random softirq handling opportunity, which may not happen before a while if the CPU goes idle afterwards with the tick stopped. Report inappropriate calling contexts when neither of the three above conditions are met. Signed-off-by: Frederic Weisbecker <frederic@xxxxxxxxxx> --- include/linux/lockdep.h | 12 ++++++++++++ net/core/dev.c | 1 + 2 files changed, 13 insertions(+) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 67964dc4db95..1bd730b881f0 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -619,6 +619,17 @@ do { \ (!in_softirq() || in_irq() || in_nmi())); \ } while (0) +/* + * Assert to be either in hardirq or in serving softirq or with + * softirqs disabled. Verifies a safe context to queue a softirq + * with __raise_softirq_irqoff(). + */ +#define lockdep_assert_in_interrupt() \ +do { \ + WARN_ON_ONCE(__lockdep_enabled && !in_interrupt()); \ +} while (0) + + extern void lockdep_assert_in_softirq_func(void); #else @@ -634,6 +645,7 @@ extern void lockdep_assert_in_softirq_func(void); # define lockdep_assert_preemption_enabled() do { } while (0) # define lockdep_assert_preemption_disabled() do { } while (0) # define lockdep_assert_in_softirq() do { } while (0) +# define lockdep_assert_in_interrupt() do { } while (0) # define lockdep_assert_in_softirq_func() do { } while (0) #endif diff --git a/net/core/dev.c b/net/core/dev.c index c0021cbd28fc..80e415ccf2c8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4666,6 +4666,7 @@ static inline void ____napi_schedule(struct softnet_data *sd, struct task_struct *thread; lockdep_assert_irqs_disabled(); + lockdep_assert_in_interrupt(); if (test_bit(NAPI_STATE_THREADED, &napi->state)) { /* Paired with smp_mb__before_atomic() in -- 2.46.0