Alexei Starovoitov <alexei.starovoitov@xxxxxxxxx> writes: > On Fri, Dec 15, 2023 at 9:10 AM Sebastian Andrzej Siewior > <bigeasy@xxxxxxxxxxxxx> wrote: >> diff --git a/net/core/dev.c b/net/core/dev.c >> index 5a0f6da7b3ae5..5ba7509e88752 100644 >> --- a/net/core/dev.c >> +++ b/net/core/dev.c >> @@ -3993,6 +3993,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, >> *pt_prev = NULL; >> } >> >> + guard(local_lock_nested_bh)(&bpf_run_lock.redirect_lock); >> qdisc_skb_cb(skb)->pkt_len = skb->len; >> tcx_set_ingress(skb, true); >> >> @@ -4045,6 +4046,7 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) >> if (!entry) >> return skb; >> >> + guard(local_lock_nested_bh)(&bpf_run_lock.redirect_lock); >> /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was >> * already set by the caller. >> */ >> @@ -5008,6 +5010,7 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) >> u32 act; >> int err; >> >> + guard(local_lock_nested_bh)(&bpf_run_lock.redirect_lock); >> act = netif_receive_generic_xdp(skb, &xdp, xdp_prog); >> if (act != XDP_PASS) { >> switch (act) { >> diff --git a/net/core/filter.c b/net/core/filter.c >> index 7c9653734fb60..72a7812f933a1 100644 >> --- a/net/core/filter.c >> +++ b/net/core/filter.c >> @@ -4241,6 +4241,7 @@ static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = { >> */ >> void xdp_do_flush(void) >> { >> + guard(local_lock_nested_bh)(&bpf_run_lock.redirect_lock); >> __dev_flush(); >> __cpu_map_flush(); >> __xsk_map_flush(); >> diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c >> index a94943681e5aa..74b88e897a7e3 100644 >> --- a/net/core/lwt_bpf.c >> +++ b/net/core/lwt_bpf.c >> @@ -44,6 +44,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, >> * BPF prog and skb_do_redirect(). >> */ >> local_bh_disable(); >> + local_lock_nested_bh(&bpf_run_lock.redirect_lock); >> bpf_compute_data_pointers(skb); >> ret = bpf_prog_run_save_cb(lwt->prog, skb); >> >> @@ -76,6 +77,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, >> break; >> } >> >> + local_unlock_nested_bh(&bpf_run_lock.redirect_lock); >> local_bh_enable(); >> >> return ret; >> diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c >> index 1976bd1639863..da61b99bc558f 100644 >> --- a/net/sched/cls_api.c >> +++ b/net/sched/cls_api.c >> @@ -23,6 +23,7 @@ >> #include <linux/jhash.h> >> #include <linux/rculist.h> >> #include <linux/rhashtable.h> >> +#include <linux/bpf.h> >> #include <net/net_namespace.h> >> #include <net/sock.h> >> #include <net/netlink.h> >> @@ -3925,6 +3926,7 @@ struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, stru >> >> fl = rcu_dereference_bh(qe->filter_chain); >> >> + guard(local_lock_nested_bh)(&bpf_run_lock.redirect_lock); >> switch (tcf_classify(skb, NULL, fl, &cl_res, false)) { >> case TC_ACT_SHOT: >> qdisc_qstats_drop(sch); > > Here and in all other places this patch adds locks that > will kill performance of XDP, tcx and everything else in networking. > > I'm surprised Jesper and other folks are not jumping in with nacks. > We measure performance in nanoseconds here. > Extra lock is no go. > Please find a different way without ruining performance. I'll add that while all this compiles out as no-ops on !PREEMPT_RT, I do believe there are people who are using XDP on PREEMPT_RT kernels and still expect decent performance. And to achieve that it is absolutely imperative that we can amortise expensive operations (such as locking) over multiple packets. I realise there's a fundamental trade-off between the amount of amortisation and the latency hit that we take from holding locks for longer, but tuning the batch size (while still keeping some amount of batching) may be a way forward? I suppose Jakub's suggestion in the other part of the thread, of putting the locks around napi->poll(), is a step towards something like this. -Toke