Re: [PATCH bpf-next] bpf: Prevent bpf program recursion for raw tracepoint probes

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 09/08, Jiri Olsa wrote:
We got report from sysbot [1] about warnings that were caused by
bpf program attached to contention_begin raw tracepoint triggering
the same tracepoint by using bpf_trace_printk helper that takes
trace_printk_lock lock.

  Call Trace:
   <TASK>
   ? trace_event_raw_event_bpf_trace_printk+0x5f/0x90
   bpf_trace_printk+0x2b/0xe0
   bpf_prog_a9aec6167c091eef_prog+0x1f/0x24
   bpf_trace_run2+0x26/0x90
   native_queued_spin_lock_slowpath+0x1c6/0x2b0
   _raw_spin_lock_irqsave+0x44/0x50
   bpf_trace_printk+0x3f/0xe0
   bpf_prog_a9aec6167c091eef_prog+0x1f/0x24
   bpf_trace_run2+0x26/0x90
   native_queued_spin_lock_slowpath+0x1c6/0x2b0
   _raw_spin_lock_irqsave+0x44/0x50
   bpf_trace_printk+0x3f/0xe0
   bpf_prog_a9aec6167c091eef_prog+0x1f/0x24
   bpf_trace_run2+0x26/0x90
   native_queued_spin_lock_slowpath+0x1c6/0x2b0
   _raw_spin_lock_irqsave+0x44/0x50
   bpf_trace_printk+0x3f/0xe0
   bpf_prog_a9aec6167c091eef_prog+0x1f/0x24
   bpf_trace_run2+0x26/0x90
   native_queued_spin_lock_slowpath+0x1c6/0x2b0
   _raw_spin_lock_irqsave+0x44/0x50
   __unfreeze_partials+0x5b/0x160
   ...

The can be reproduced by attaching bpf program as raw tracepoint on
contention_begin tracepoint. The bpf prog calls bpf_trace_printk
helper. Then by running perf bench the spin lock code is forced to
take slowpath and call contention_begin tracepoint.

Fixing this by skipping execution of the bpf program if it's
already running, Using bpf prog 'active' field, which is being
currently used by trampoline programs for the same reason.

Makes sense to me and seems to address Alexei's earlier point
about bpf_prog_active.

Reviewed-by: Stanislav Fomichev <sdf@xxxxxxxxxx>

Reported-by: syzbot+2251879aa068ad9c960d@xxxxxxxxxxxxxxxxxxxxxxxxx
[1] https://lore.kernel.org/bpf/YxhFe3EwqchC%2FfYf@krava/T/#t
Signed-off-by: Jiri Olsa <jolsa@xxxxxxxxxx>
---
  include/linux/bpf.h      | 1 +
  kernel/bpf/trampoline.c  | 6 +++---
  kernel/trace/bpf_trace.c | 6 ++++++
  3 files changed, 10 insertions(+), 3 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 48ae05099f36..4737bd0fcbb8 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2640,4 +2640,5 @@ static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {}
  static inline void bpf_cgroup_atype_put(int cgroup_atype) {}
  #endif /* CONFIG_BPF_LSM */

+void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
  #endif /* _LINUX_BPF_H */
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index ad76940b02cc..a098bdc33209 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -863,7 +863,7 @@ static __always_inline u64 notrace bpf_prog_start_time(void)
  	return start;
  }

-static void notrace inc_misses_counter(struct bpf_prog *prog)
+void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
  {
  	struct bpf_prog_stats *stats;
  	unsigned int flags;
@@ -896,7 +896,7 @@ u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *ru
  	run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);

  	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
-		inc_misses_counter(prog);
+		bpf_prog_inc_misses_counter(prog);
  		return 0;
  	}
  	return bpf_prog_start_time();
@@ -967,7 +967,7 @@ u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_r
  	might_fault();

  	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
-		inc_misses_counter(prog);
+		bpf_prog_inc_misses_counter(prog);
  		return 0;
  	}

diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 68e5cdd24cef..c8cd1aa7b112 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -2042,9 +2042,15 @@ static __always_inline
  void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
  {
  	cant_sleep();
+	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
+		bpf_prog_inc_misses_counter(prog);
+		goto out;
+	}
  	rcu_read_lock();
  	(void) bpf_prog_run(prog, args);
  	rcu_read_unlock();
+out:
+	this_cpu_dec(*(prog->active));
  }

  #define UNPACK(...)			__VA_ARGS__
--
2.37.3




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux