Use page_pool[_cache]_alloc() API to allocate memory with least memory utilization and performance penalty. Signed-off-by: Yunsheng Lin <linyunsheng@xxxxxxxxxx> CC: Lorenzo Bianconi <lorenzo@xxxxxxxxxx> CC: Alexander Duyck <alexander.duyck@xxxxxxxxx> CC: Liang Chen <liangchen.linux@xxxxxxxxx> CC: Alexander Lobakin <aleksander.lobakin@xxxxxxxxx> --- drivers/net/veth.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 953f6d8f8db0..f9bb79d89a84 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -736,10 +736,11 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, if (skb_shared(skb) || skb_head_is_locked(skb) || skb_shinfo(skb)->nr_frags || skb_headroom(skb) < XDP_PACKET_HEADROOM) { - u32 size, len, max_head_size, off; + u32 size, len, max_head_size, off, truesize, page_offset; struct sk_buff *nskb; struct page *page; int i, head_off; + void *data; /* We need a private copy of the skb and data buffers since * the ebpf program can modify it. We segment the original skb @@ -752,14 +753,17 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size) goto drop; + size = min_t(u32, skb->len, max_head_size); + truesize = SKB_HEAD_ALIGN(size) + VETH_XDP_HEADROOM; + /* Allocate skb head */ - page = page_pool_dev_alloc_pages(rq->page_pool); - if (!page) + data = page_pool_dev_cache_alloc(rq->page_pool, &truesize); + if (!data) goto drop; - nskb = napi_build_skb(page_address(page), PAGE_SIZE); + nskb = napi_build_skb(data, truesize); if (!nskb) { - page_pool_put_full_page(rq->page_pool, page, true); + page_pool_cache_free(rq->page_pool, data, true); goto drop; } @@ -767,7 +771,6 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, skb_copy_header(nskb, skb); skb_mark_for_recycle(nskb); - size = min_t(u32, skb->len, max_head_size); if (skb_copy_bits(skb, 0, nskb->data, size)) { consume_skb(nskb); goto drop; @@ -782,14 +785,18 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, len = skb->len - off; for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { - page = page_pool_dev_alloc_pages(rq->page_pool); + size = min_t(u32, len, PAGE_SIZE); + truesize = size; + + page = page_pool_dev_alloc(rq->page_pool, &page_offset, + &truesize); if (!page) { consume_skb(nskb); goto drop; } - size = min_t(u32, len, PAGE_SIZE); - skb_add_rx_frag(nskb, i, page, 0, size, PAGE_SIZE); + skb_add_rx_frag(nskb, i, page, page_offset, size, + truesize); if (skb_copy_bits(skb, off, page_address(page), size)) { consume_skb(nskb); -- 2.33.0