Make several functions global to be able to use GRO without a NAPI instance. This includes init, cleanup, receive functions, as well as a couple inlines to start and stop the deferred flush timer. Taking into account already global gro_flush(), it is now fully possible to maintain a GRO node without an aux NAPI entity. Signed-off-by: Alexander Lobakin <alexandr.lobakin@xxxxxxxxx> --- include/net/gro.h | 18 +++++++++++++++ net/core/dev.c | 45 ++++++------------------------------- net/core/gro.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+), 38 deletions(-) diff --git a/include/net/gro.h b/include/net/gro.h index 75211ebd8765..539f931e736f 100644 --- a/include/net/gro.h +++ b/include/net/gro.h @@ -421,6 +421,7 @@ static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto) } int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); +void gro_receive_skb_list(struct gro_node *gro, struct list_head *list); void __gro_flush(struct gro_node *gro, bool flush_old); static inline void gro_flush(struct gro_node *gro, bool flush_old) @@ -458,5 +459,22 @@ static inline void gro_normal_one(struct gro_node *gro, struct sk_buff *skb, gro_normal_list(gro); } +static inline void gro_timer_start(struct gro_node *gro, u64 timeout_ns) +{ + if (!timeout_ns) + return; + + hrtimer_start(&gro->timer, ns_to_ktime(timeout_ns), + HRTIMER_MODE_REL_PINNED); +} + +static inline void gro_timer_cancel(struct gro_node *gro) +{ + hrtimer_cancel(&gro->timer); +} + +void gro_init(struct gro_node *gro, + enum hrtimer_restart (*timer_cb)(struct hrtimer *timer)); +void gro_cleanup(struct gro_node *gro); #endif /* _NET_IPV6_GRO_H */ diff --git a/net/core/dev.c b/net/core/dev.c index 8b334aa974c2..62bf6ee00741 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5812,9 +5812,8 @@ bool napi_complete_done(struct napi_struct *n, int work_done) return false; } - if (timeout) - hrtimer_start(&n->gro.timer, ns_to_ktime(timeout), - HRTIMER_MODE_REL_PINNED); + gro_timer_start(&n->gro, timeout); + return ret; } EXPORT_SYMBOL(napi_complete_done); @@ -5876,7 +5875,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs); timeout = READ_ONCE(napi->dev->gro_flush_timeout); if (napi->defer_hard_irqs_count && timeout) { - hrtimer_start(&napi->gro.timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED); + gro_timer_start(&napi->gro, timeout); skip_schedule = true; } } @@ -6025,17 +6024,6 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) return HRTIMER_NORESTART; } -static void init_gro_hash(struct napi_struct *napi) -{ - int i; - - for (i = 0; i < GRO_HASH_BUCKETS; i++) { - INIT_LIST_HEAD(&napi->gro.hash[i].list); - napi->gro.hash[i].count = 0; - } - napi->gro.bitmask = 0; -} - int dev_set_threaded(struct net_device *dev, bool threaded) { struct napi_struct *napi; @@ -6105,12 +6093,8 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, INIT_LIST_HEAD(&napi->poll_list); INIT_HLIST_NODE(&napi->napi_hash_node); - hrtimer_init(&napi->gro.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); - napi->gro.timer.function = napi_watchdog; - init_gro_hash(napi); + gro_init(&napi->gro, napi_watchdog); napi->skb = NULL; - INIT_LIST_HEAD(&napi->gro.rx_list); - napi->gro.rx_count = 0; napi->poll = poll; if (weight > NAPI_POLL_WEIGHT) netdev_err_once(dev, "%s() called with weight %d\n", __func__, @@ -6155,8 +6139,7 @@ void napi_disable(struct napi_struct *n) break; } - hrtimer_cancel(&n->gro.timer); - + gro_timer_cancel(&n->gro); clear_bit(NAPI_STATE_DISABLE, &n->state); } EXPORT_SYMBOL(napi_disable); @@ -6183,19 +6166,6 @@ void napi_enable(struct napi_struct *n) } EXPORT_SYMBOL(napi_enable); -static void flush_gro_hash(struct napi_struct *napi) -{ - int i; - - for (i = 0; i < GRO_HASH_BUCKETS; i++) { - struct sk_buff *skb, *n; - - list_for_each_entry_safe(skb, n, &napi->gro.hash[i].list, list) - kfree_skb(skb); - napi->gro.hash[i].count = 0; - } -} - /* Must be called in process context */ void __netif_napi_del(struct napi_struct *napi) { @@ -6206,8 +6176,7 @@ void __netif_napi_del(struct napi_struct *napi) list_del_rcu(&napi->dev_list); napi_free_frags(napi); - flush_gro_hash(napi); - napi->gro.bitmask = 0; + gro_cleanup(&napi->gro); if (napi->thread) { kthread_stop(napi->thread); @@ -10627,7 +10596,7 @@ static int __init net_dev_init(void) INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd); spin_lock_init(&sd->defer_lock); - init_gro_hash(&sd->backlog); + gro_init(&sd->backlog.gro, NULL); sd->backlog.poll = process_backlog; sd->backlog.weight = weight_p; } diff --git a/net/core/gro.c b/net/core/gro.c index 67fd587a87c9..424c812abe79 100644 --- a/net/core/gro.c +++ b/net/core/gro.c @@ -624,6 +624,18 @@ static gro_result_t gro_skb_finish(struct gro_node *gro, struct sk_buff *skb, return ret; } +void gro_receive_skb_list(struct gro_node *gro, struct list_head *list) +{ + struct sk_buff *skb, *tmp; + + list_for_each_entry_safe(skb, tmp, list, list) { + skb_list_del_init(skb); + + skb_gro_reset_offset(skb, 0); + gro_skb_finish(gro, skb, dev_gro_receive(gro, skb)); + } +} + gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { struct gro_node *gro = &napi->gro; @@ -792,3 +804,48 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) return sum; } EXPORT_SYMBOL(__skb_gro_checksum_complete); + +void gro_init(struct gro_node *gro, + enum hrtimer_restart (*timer_cb)(struct hrtimer *)) +{ + u32 i; + + for (i = 0; i < GRO_HASH_BUCKETS; i++) { + INIT_LIST_HEAD(&gro->hash[i].list); + gro->hash[i].count = 0; + } + + gro->bitmask = 0; + + INIT_LIST_HEAD(&gro->rx_list); + gro->rx_count = 0; + + if (!timer_cb) + return; + + hrtimer_init(&gro->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); + gro->timer.function = timer_cb; +} + +void gro_cleanup(struct gro_node *gro) +{ + struct sk_buff *skb, *n; + u32 i; + + gro_timer_cancel(gro); + memset(&gro->timer, 0, sizeof(gro->timer)); + + for (i = 0; i < GRO_HASH_BUCKETS; i++) { + list_for_each_entry_safe(skb, n, &gro->hash[i].list, list) + kfree_skb(skb); + + gro->hash[i].count = 0; + } + + gro->bitmask = 0; + + list_for_each_entry_safe(skb, n, &gro->rx_list, list) + kfree_skb(skb); + + gro->rx_count = 0; +} -- 2.36.1