[PATCH] [NETPOLL] Avoid recursive NAPI poll calls on UP

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Add a flag to prevent simultaneous, recursive, calls to a driver's
NAPI poll function by net_rx_action() and poll_napi().  This is
already prevented by locks on SMP, but the flag is needed for UP.

Signed-off-by: Dale Farnsworth <dale@xxxxxxxxxxxxxx>

---
I saw the recursion using kgdboe on an mpc8548cds system.

There was one-way protection via npinfo->poll_owner, but nothing
preventing net_rx_action() from calling the poll function when
it had already been invoked by poll_napi().

Perhaps the rx_flags and flags fields could be merged into a
single field, but I was unsure of the locking.

 include/linux/netpoll.h |   20 +++++++++++++++++---
 net/core/dev.c          |    6 ++++++
 net/core/netpoll.c      |   10 ++++++----
 3 files changed, 29 insertions(+), 7 deletions(-)

diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index ca5a873..467e52d 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -28,11 +28,15 @@ struct netpoll_info {
 	spinlock_t poll_lock;
 	int poll_owner;
 	int tries;
+	int flags;
 	int rx_flags;
 	spinlock_t rx_lock;
 	struct netpoll *rx_np; /* netpoll that registered an rx_hook */
 };
 
+/* netpoll_info.flags */
+#define NETPOLL_POLL_ACTIVE	1
+
 void netpoll_poll(struct netpoll *np);
 void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
 int netpoll_parse_options(struct netpoll *np, char *opt);
@@ -44,6 +48,9 @@ int __netpoll_rx(struct sk_buff *skb);
 void netpoll_queue(struct sk_buff *skb);
 
 #ifdef CONFIG_NETPOLL
+
+static struct netpoll_info dummy_netpoll_info;
+
 static inline int netpoll_rx(struct sk_buff *skb)
 {
 	struct netpoll_info *npinfo = skb->dev->npinfo;
@@ -67,17 +74,24 @@ static inline void *netpoll_poll_lock(st
 	rcu_read_lock(); /* deal with race on ->npinfo */
 	if (dev->npinfo) {
 		spin_lock(&dev->npinfo->poll_lock);
+		if (dev->npinfo->flags & NETPOLL_POLL_ACTIVE) {
+			spin_unlock(&dev->npinfo->poll_lock);
+			rcu_read_unlock();
+			return NULL;
+		}
+		dev->npinfo->flags |= NETPOLL_POLL_ACTIVE;
 		dev->npinfo->poll_owner = smp_processor_id();
 		return dev->npinfo;
 	}
-	return NULL;
+	return &dummy_netpoll_info;
 }
 
 static inline void netpoll_poll_unlock(void *have)
 {
 	struct netpoll_info *npi = have;
 
-	if (npi) {
+	if (npi != &dummy_netpoll_info) {
+		npi->flags &= ~NETPOLL_POLL_ACTIVE;
 		npi->poll_owner = -1;
 		spin_unlock(&npi->poll_lock);
 	}
@@ -86,7 +100,7 @@ static inline void netpoll_poll_unlock(v
 
 #else
 #define netpoll_rx(a) 0
-#define netpoll_poll_lock(a) NULL
+#define netpoll_poll_lock(a) (&dummy_netpoll_info)
 #define netpoll_poll_unlock(a)
 #endif
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 434220d..ee6520f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1829,6 +1829,12 @@ static void net_rx_action(struct softirq
 				 struct net_device, poll_list);
 		have = netpoll_poll_lock(dev);
 
+		if (!have) {
+			/* dev->poll() already running, retry later */
+			__raise_softirq_irqoff(NET_RX_SOFTIRQ);
+			return;
+		}
+
 		if (dev->quota <= 0 || dev->poll(dev, &budget)) {
 			netpoll_poll_unlock(have);
 			local_irq_disable();
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index e8e05ce..425e4a0 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -126,8 +126,8 @@ static int checksum_udp(struct sk_buff *
  *
  * Note: we don't mask interrupts with this lock because we're using
  * trylock here and interrupts are already disabled in the softirq
- * case. Further, we test the poll_owner to avoid recursion on UP
- * systems where the lock doesn't exist.
+ * case. Further, we test the NETPOLL_POLL_ACTIVE flag to avoid recursion
+ * on UP systems where the lock doesn't exist.
  *
  * In cases where there is bi-directional communications, reading only
  * one message at a time can lead to packets being dropped by the
@@ -140,8 +140,9 @@ static void poll_napi(struct netpoll *np
 	int budget = 16;
 
 	if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
-	    npinfo->poll_owner != smp_processor_id() &&
-	    spin_trylock(&npinfo->poll_lock)) {
+	    spin_trylock(&npinfo->poll_lock) &&
+	    (npinfo->flags & NETPOLL_POLL_ACTIVE) == 0) {
+		npinfo->flags |= NETPOLL_POLL_ACTIVE;
 		npinfo->rx_flags |= NETPOLL_RX_DROP;
 		atomic_inc(&trapped);
 
@@ -149,6 +150,7 @@ static void poll_napi(struct netpoll *np
 
 		atomic_dec(&trapped);
 		npinfo->rx_flags &= ~NETPOLL_RX_DROP;
+		npinfo->flags &= ~NETPOLL_POLL_ACTIVE;
 		spin_unlock(&npinfo->poll_lock);
 	}
 }
-
: send the line "unsubscribe linux-net" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Netdev]     [Ethernet Bridging]     [Linux 802.1Q VLAN]     [Linux Wireless]     [Kernel Newbies]     [Security]     [Linux for Hams]     [Netfilter]     [Git]     [Bugtraq]     [Yosemite News and Information]     [MIPS Linux]     [ARM Linux]     [Linux RAID]     [Linux PCI]     [Linux Admin]     [Samba]

  Powered by Linux