The required protection is that the caller cannot be migrated to a different CPU as these functions end up in places which take either a hash bucket lock or might trigger a kprobe inside the memory allocator. Both scenarios can lead to deadlocks. The deadlock prevention is per CPU by incrementing a per CPU variable which temporarily blocks the invocation of BPF programs from perf and kprobes. Replace the preempt_disable/enable() pairs with migrate_disable/enable() pairs to prepare BPF to work on PREEMPT_RT enabled kernels. On a non-RT kernel this maps to preempt_disable/enable(), i.e. no functional change. Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> --- kernel/bpf/syscall.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -174,7 +174,7 @@ static int bpf_map_update_value(struct b /* must increment bpf_prog_active to avoid kprobe+bpf triggering from * inside bpf map update or delete otherwise deadlocks are possible */ - preempt_disable(); + migrate_disable(); __this_cpu_inc(bpf_prog_active); if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { @@ -207,7 +207,7 @@ static int bpf_map_update_value(struct b rcu_read_unlock(); } __this_cpu_dec(bpf_prog_active); - preempt_enable(); + migrate_enable(); maybe_wait_bpf_programs(map); return err; @@ -222,7 +222,7 @@ static int bpf_map_copy_value(struct bpf if (bpf_map_is_dev_bound(map)) return bpf_map_offload_lookup_elem(map, key, value); - preempt_disable(); + migrate_disable(); this_cpu_inc(bpf_prog_active); if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { @@ -269,7 +269,7 @@ static int bpf_map_copy_value(struct bpf } this_cpu_dec(bpf_prog_active); - preempt_enable(); + migrate_enable(); maybe_wait_bpf_programs(map); return err; @@ -1136,13 +1136,13 @@ static int map_delete_elem(union bpf_att goto out; } - preempt_disable(); + migrate_disable(); __this_cpu_inc(bpf_prog_active); rcu_read_lock(); err = map->ops->map_delete_elem(map, key); rcu_read_unlock(); __this_cpu_dec(bpf_prog_active); - preempt_enable(); + migrate_enable(); maybe_wait_bpf_programs(map); out: kfree(key); @@ -1254,13 +1254,13 @@ int generic_map_delete_batch(struct bpf_ break; } - preempt_disable(); + migrate_disable(); __this_cpu_inc(bpf_prog_active); rcu_read_lock(); err = map->ops->map_delete_elem(map, key); rcu_read_unlock(); __this_cpu_dec(bpf_prog_active); - preempt_enable(); + migrate_enable(); maybe_wait_bpf_programs(map); if (err) break;