On 5/22/20 10:11 AM, Lorenzo Bianconi wrote: > @@ -259,28 +270,64 @@ static int cpu_map_kthread_run(void *data) > * kthread CPU pinned. Lockless access to ptr_ring > * consume side valid as no-resize allowed of queue. > */ > - n = ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH); > + n = ptr_ring_consume_batched(rcpu->queue, xdp_frames, > + CPUMAP_BATCH); > > + rcu_read_lock(); > + > + prog = READ_ONCE(rcpu->prog); > for (i = 0; i < n; i++) { > - void *f = frames[i]; > + void *f = xdp_frames[i]; > struct page *page = virt_to_page(f); > + struct xdp_frame *xdpf; > + struct xdp_buff xdp; > + u32 act; > > /* Bring struct page memory area to curr CPU. Read by > * build_skb_around via page_is_pfmemalloc(), and when > * freed written by page_frag_free call. > */ > prefetchw(page); > + if (!prog) { > + frames[nframes++] = xdp_frames[i]; > + continue; > + } > + > + xdpf = f; > + xdp.data_hard_start = xdpf->data - xdpf->headroom; > + xdp.data = xdpf->data; > + xdp.data_end = xdpf->data + xdpf->len; > + xdp.data_meta = xdpf->data - xdpf->metasize; > + xdp.frame_sz = xdpf->frame_sz; > + /* TODO: rxq */ > + > + act = bpf_prog_run_xdp(prog, &xdp); Why not run the program in cpu_map_enqueue before converting from xdp_buff to xdp_frame?