On 2021-11-19 15:41:25 [+0100], Mike Galbraith wrote: > --- a/net/core/netpoll.c > +++ b/net/core/netpoll.c > @@ -252,6 +252,7 @@ static void zap_completion_queue(void) > clist = sd->completion_queue; > sd->completion_queue = NULL; > local_irq_restore(flags); > + put_cpu_var(softnet_data); > > while (clist != NULL) { > struct sk_buff *skb = clist; > @@ -263,9 +264,8 @@ static void zap_completion_queue(void) > __kfree_skb(skb); > } > } > - } > - > - put_cpu_var(softnet_data); > + } else > + put_cpu_var(softnet_data); > } Looking at the callers of zap_completion_queue() it seems that get_cpu_var() could be replaced this_cpu_ptr() since the pointer is stable at this point. > static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) > @@ -365,16 +366,22 @@ static netdev_tx_t __netpoll_send_skb(st > > netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) > { > - unsigned long flags; > + unsigned long __maybe_unused flags; > netdev_tx_t ret; > > if (unlikely(!np)) { > dev_kfree_skb_irq(skb); > ret = NET_XMIT_DROP; > } else { > - local_irq_save(flags); > + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) > + local_irq_save(flags); > + else > + rcu_read_lock_bh(); > ret = __netpoll_send_skb(np, skb); > - local_irq_restore(flags); > + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) > + local_irq_restore(flags); > + else > + rcu_read_unlock_bh(); > } > return ret; > } What is the context for netpoll_send_skb()? Why do we need to disable BH + RCU on RT? If interrupts are never disabled, doesn't this break the assumption made in netpoll_tx_running()? queue_process() is also busted. Sebastian