Instead of preemption disable/enable. Signed-off-by: David S. Miller <davem@xxxxxxxxxxxxx> --- kernel/bpf/trampoline.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 7e89f1f49d77..52e39892fec4 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -195,8 +195,8 @@ void bpf_trampoline_put(struct bpf_trampoline *tr) mutex_unlock(&trampoline_mutex); } -/* The logic is similar to BPF_PROG_RUN, but with explicit rcu and preempt that - * are needed for trampoline. The macro is split into +/* The logic is similar to BPF_PROG_RUN, but with explicit rcu and bpf + * prog lock that are needed for trampoline. The macro is split into * call _bpf_prog_enter * call prog->bpf_func * call __bpf_prog_exit @@ -206,7 +206,7 @@ u64 notrace __bpf_prog_enter(void) u64 start = 0; rcu_read_lock(); - preempt_disable(); + bpf_prog_lock(); if (static_branch_unlikely(&bpf_stats_enabled_key)) start = sched_clock(); return start; @@ -229,7 +229,7 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start) stats->nsecs += sched_clock() - start; u64_stats_update_end(&stats->syncp); } - preempt_enable(); + bpf_prog_unlock(); rcu_read_unlock(); } -- 2.20.1