On Tue, Mar 08, 2022 at 03:57:57PM +0100, Toke Høiland-Jørgensen wrote: > +static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog, > + u32 repeat) > +{ > + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); > + int err = 0, act, ret, i, nframes = 0, batch_sz; > + struct xdp_frame **frames = xdp->frames; > + struct xdp_page_head *head; > + struct xdp_frame *frm; > + bool redirect = false; > + struct xdp_buff *ctx; > + struct page *page; > + > + batch_sz = min_t(u32, repeat, xdp->batch_size); > + > + local_bh_disable(); > + xdp_set_return_frame_no_direct(); > + > + for (i = 0; i < batch_sz; i++) { > + page = page_pool_dev_alloc_pages(xdp->pp); > + if (!page) { > + err = -ENOMEM; > + goto out; > + } > + > + head = phys_to_virt(page_to_phys(page)); > + reset_ctx(head); > + ctx = &head->ctx; > + frm = &head->frm; > + xdp->frame_cnt++; > + > + act = bpf_prog_run_xdp(prog, ctx); > + > + /* if program changed pkt bounds we need to update the xdp_frame */ > + if (unlikely(ctx_was_changed(head))) { > + ret = xdp_update_frame_from_buff(ctx, frm); > + if (ret) { > + xdp_return_buff(ctx); > + continue; > + } > + } > + > + switch (act) { > + case XDP_TX: > + /* we can't do a real XDP_TX since we're not in the > + * driver, so turn it into a REDIRECT back to the same > + * index > + */ > + ri->tgt_index = xdp->dev->ifindex; > + ri->map_id = INT_MAX; > + ri->map_type = BPF_MAP_TYPE_UNSPEC; > + fallthrough; > + case XDP_REDIRECT: > + redirect = true; > + ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog); > + if (ret) > + xdp_return_buff(ctx); > + break; > + case XDP_PASS: > + frames[nframes++] = frm; > + break; > + default: > + bpf_warn_invalid_xdp_action(NULL, prog, act); > + fallthrough; > + case XDP_DROP: > + xdp_return_buff(ctx); > + break; > + } > + } > + > +out: > + if (redirect) > + xdp_do_flush(); > + if (nframes) > + err = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev); This may overwrite the -ENOMEM with 0. Others lgtm. > + > + xdp_clear_return_frame_no_direct(); > + local_bh_enable(); > + return err; > +} > + > +static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx, > + u32 repeat, u32 batch_size, u32 *time) > + > +{ > + struct xdp_test_data xdp = { .batch_size = batch_size }; > + struct bpf_test_timer t = { .mode = NO_MIGRATE }; > + int ret; > + > + if (!repeat) > + repeat = 1; > + > + ret = xdp_test_run_setup(&xdp, ctx); > + if (ret) > + return ret; > + > + bpf_test_timer_enter(&t); > + do { > + xdp.frame_cnt = 0; > + ret = xdp_test_run_batch(&xdp, prog, repeat - t.i); > + if (unlikely(ret < 0)) > + break; > + } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time)); > + bpf_test_timer_leave(&t); > + > + xdp_test_run_teardown(&xdp); > + return ret; > +} > +