The users of netdev_alloc_cache and napi_frag_cache don't need to take the bh lock around access to these fragment caches any more as the percpu handling is now done in page_frag_alloc_align(). Signed-off-by: David Howells <dhowells@xxxxxxxxxx> cc: "David S. Miller" <davem@xxxxxxxxxxxxx> cc: Eric Dumazet <edumazet@xxxxxxxxxx> cc: Jakub Kicinski <kuba@xxxxxxxxxx> cc: Paolo Abeni <pabeni@xxxxxxxxxx> cc: linux-mm@xxxxxxxxx --- include/linux/skbuff.h | 3 ++- net/core/skbuff.c | 29 +++++++++-------------------- 2 files changed, 11 insertions(+), 21 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 41b63e72c6c3..e11a765fe7fa 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -252,7 +252,8 @@ /* Maximum value in skb->csum_level */ #define SKB_MAX_CSUM_LEVEL 3 -#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) +#define SKB_DATA_ALIGNMENT SMP_CACHE_BYTES +#define SKB_DATA_ALIGN(X) ALIGN(X, SKB_DATA_ALIGNMENT) #define SKB_WITH_OVERHEAD(X) \ ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 225a16f3713f..c2840b0dcad9 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -291,27 +291,20 @@ void napi_get_frags_check(struct napi_struct *napi) void *napi_alloc_frag_align(unsigned int fragsz, unsigned int align) { - fragsz = SKB_DATA_ALIGN(fragsz); - + align = min_t(unsigned int, align, SKB_DATA_ALIGNMENT); return page_frag_alloc_align(&napi_frag_cache, fragsz, GFP_ATOMIC, align); } EXPORT_SYMBOL(napi_alloc_frag_align); void *netdev_alloc_frag_align(unsigned int fragsz, unsigned int align) { - void *data; - - fragsz = SKB_DATA_ALIGN(fragsz); - if (in_hardirq() || irqs_disabled()) { - data = page_frag_alloc_align(&netdev_alloc_cache, + align = min_t(unsigned int, align, SKB_DATA_ALIGNMENT); + if (in_hardirq() || irqs_disabled()) + return page_frag_alloc_align(&netdev_alloc_cache, fragsz, GFP_ATOMIC, align); - } else { - local_bh_disable(); - data = page_frag_alloc_align(&napi_frag_cache, + else + return page_frag_alloc_align(&napi_frag_cache, fragsz, GFP_ATOMIC, align); - local_bh_enable(); - } - return data; } EXPORT_SYMBOL(netdev_alloc_frag_align); @@ -709,15 +702,11 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; - if (in_hardirq() || irqs_disabled()) { + if (in_hardirq() || irqs_disabled()) data = page_frag_alloc(&netdev_alloc_cache, len, gfp_mask); - pfmemalloc = folio_is_pfmemalloc(virt_to_folio(data)); - } else { - local_bh_disable(); + else data = page_frag_alloc(&napi_frag_cache, len, gfp_mask); - pfmemalloc = folio_is_pfmemalloc(virt_to_folio(data)); - local_bh_enable(); - } + pfmemalloc = folio_is_pfmemalloc(virt_to_folio(data)); if (unlikely(!data)) return NULL;