Rely on skb_cow_data_for_xdp utility routine and remove duplicated code. Acked-by: Jesper Dangaard Brouer <hawk@xxxxxxxxxx> Reviewed-by: Toke Hoiland-Jorgensen <toke@xxxxxxxxxx> Signed-off-by: Lorenzo Bianconi <lorenzo@xxxxxxxxxx> --- drivers/net/veth.c | 79 +++------------------------------------------- 1 file changed, 5 insertions(+), 74 deletions(-) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 578e36ea1589..a7a541c1a374 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -721,7 +721,8 @@ static void veth_xdp_get(struct xdp_buff *xdp) static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, struct xdp_buff *xdp, - struct sk_buff **pskb) + struct sk_buff **pskb, + struct bpf_prog *prog) { struct sk_buff *skb = *pskb; u32 frame_sz; @@ -729,80 +730,10 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, if (skb_shared(skb) || skb_head_is_locked(skb) || skb_shinfo(skb)->nr_frags || skb_headroom(skb) < XDP_PACKET_HEADROOM) { - u32 size, len, max_head_size, off, truesize, page_offset; - struct sk_buff *nskb; - struct page *page; - int i, head_off; - void *va; - - /* We need a private copy of the skb and data buffers since - * the ebpf program can modify it. We segment the original skb - * into order-0 pages without linearize it. - * - * Make sure we have enough space for linear and paged area - */ - max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - - VETH_XDP_HEADROOM); - if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size) - goto drop; - - size = min_t(u32, skb->len, max_head_size); - truesize = SKB_HEAD_ALIGN(size) + VETH_XDP_HEADROOM; - - /* Allocate skb head */ - va = page_pool_dev_alloc_va(rq->page_pool, &truesize); - if (!va) - goto drop; - - nskb = napi_build_skb(va, truesize); - if (!nskb) { - page_pool_free_va(rq->page_pool, va, true); + if (skb_cow_data_for_xdp(rq->page_pool, pskb, prog)) goto drop; - } - - skb_reserve(nskb, VETH_XDP_HEADROOM); - skb_copy_header(nskb, skb); - skb_mark_for_recycle(nskb); - - if (skb_copy_bits(skb, 0, nskb->data, size)) { - consume_skb(nskb); - goto drop; - } - skb_put(nskb, size); - head_off = skb_headroom(nskb) - skb_headroom(skb); - skb_headers_offset_update(nskb, head_off); - - /* Allocate paged area of new skb */ - off = size; - len = skb->len - off; - - for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { - size = min_t(u32, len, PAGE_SIZE); - truesize = size; - - page = page_pool_dev_alloc(rq->page_pool, &page_offset, - &truesize); - if (!page) { - consume_skb(nskb); - goto drop; - } - - skb_add_rx_frag(nskb, i, page, page_offset, size, - truesize); - if (skb_copy_bits(skb, off, - page_address(page) + page_offset, - size)) { - consume_skb(nskb); - goto drop; - } - - len -= size; - off += size; - } - - consume_skb(skb); - skb = nskb; + skb = *pskb; } /* SKB "head" area always have tailroom for skb_shared_info */ @@ -850,7 +781,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, } __skb_push(skb, skb->data - skb_mac_header(skb)); - if (veth_convert_skb_to_xdp_buff(rq, xdp, &skb)) + if (veth_convert_skb_to_xdp_buff(rq, xdp, &skb, xdp_prog)) goto drop; vxbuf.skb = skb; -- 2.43.0