[PATCH bpf-next v4 2/5] bpf: Introduce bpf_jit_supports_tail_call_cnt_ptr()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



In order to store tail_call_cnt on the stack of bpf prog's caller,
introduce bpf_tail_call_run_ctx as a new ctx for bpf prog, which
wraps the original ctx and tail_call_cnt pointer.

To avoid breaking run time, introduce use_tail_call_run_ctx in
prog->aux in order to determine whether to use bpf_tail_call_run_ctx
before calling bpf prog. This flag will be set when
prog->aux->tail_call_reachable and the prog is jited and the arch
supports bpf_jit_supports_tail_call_cnt_ptr() at load time. Thereafter,
the prog's prologue has to cache tail_call_cnt_ptr, and retore the
original ctx meanwhile.

Signed-off-by: Leon Hwang <hffilwlqm@xxxxxxxxx>
---
 include/linux/bpf.h    |  6 ++++++
 include/linux/filter.h | 13 ++++++++++---
 kernel/bpf/core.c      | 19 +++++++++++++++++++
 3 files changed, 35 insertions(+), 3 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 90094400cc63d..95888700966f7 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1466,6 +1466,7 @@ struct bpf_prog_aux {
 	bool attach_tracing_prog; /* true if tracing another tracing program */
 	bool func_proto_unreliable;
 	bool tail_call_reachable;
+	bool use_tail_call_run_ctx;
 	bool xdp_has_frags;
 	bool exception_cb;
 	bool exception_boundary;
@@ -2047,6 +2048,11 @@ struct bpf_trace_run_ctx {
 	bool is_uprobe;
 };
 
+struct bpf_tail_call_run_ctx {
+	const void *ctx;
+	u32 *tail_call_cnt_ptr;
+};
+
 struct bpf_tramp_run_ctx {
 	struct bpf_run_ctx run_ctx;
 	u64 bpf_cookie;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 7a27f19bf44d0..f8e9d5e3da11f 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -671,7 +671,13 @@ static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
 					  const void *ctx,
 					  bpf_dispatcher_fn dfunc)
 {
-	u32 ret;
+	struct bpf_tail_call_run_ctx tail_call_run_ctx = {};
+	u32 ret, tail_call_cnt = 0;
+	const void *run_ctx;
+
+	tail_call_run_ctx.ctx = ctx;
+	tail_call_run_ctx.tail_call_cnt_ptr = &tail_call_cnt;
+	run_ctx = prog->aux->use_tail_call_run_ctx ? &tail_call_run_ctx : ctx;
 
 	cant_migrate();
 	if (static_branch_unlikely(&bpf_stats_enabled_key)) {
@@ -679,7 +685,7 @@ static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
 		u64 duration, start = sched_clock();
 		unsigned long flags;
 
-		ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+		ret = dfunc(run_ctx, prog->insnsi, prog->bpf_func);
 
 		duration = sched_clock() - start;
 		stats = this_cpu_ptr(prog->stats);
@@ -688,7 +694,7 @@ static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
 		u64_stats_add(&stats->nsecs, duration);
 		u64_stats_update_end_irqrestore(&stats->syncp, flags);
 	} else {
-		ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+		ret = dfunc(run_ctx, prog->insnsi, prog->bpf_func);
 	}
 	return ret;
 }
@@ -994,6 +1000,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
 void bpf_jit_compile(struct bpf_prog *prog);
 bool bpf_jit_needs_zext(void);
 bool bpf_jit_supports_subprog_tailcalls(void);
+bool bpf_jit_supports_tail_call_cnt_ptr(void);
 bool bpf_jit_supports_percpu_insn(void);
 bool bpf_jit_supports_kfunc_call(void);
 bool bpf_jit_supports_far_kfunc_call(void);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 99b8b1c9a248c..3fad4d973b820 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2358,6 +2358,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
 	return ret;
 }
 
+static void bpf_check_tail_call_run_ctx(struct bpf_prog *fp)
+{
+	if (fp->aux->tail_call_reachable && fp->jited &&
+	    bpf_jit_supports_tail_call_cnt_ptr())
+		fp->aux->use_tail_call_run_ctx = true;
+}
+
 static void bpf_prog_select_func(struct bpf_prog *fp)
 {
 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
@@ -2430,6 +2437,10 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
 	 * all eBPF JITs might immediately support all features.
 	 */
 	*err = bpf_check_tail_call(fp);
+	if (*err)
+		return fp;
+
+	bpf_check_tail_call_run_ctx(fp);
 
 	return fp;
 }
@@ -2941,6 +2952,14 @@ bool __weak bpf_jit_needs_zext(void)
 	return false;
 }
 
+/* Return TRUE if the JIT backend supports tail call count pointer in tailcall
+ * context.
+ */
+bool __weak bpf_jit_supports_tail_call_cnt_ptr(void)
+{
+	return false;
+}
+
 /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
 bool __weak bpf_jit_supports_subprog_tailcalls(void)
 {
-- 
2.44.0





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux