[PATCH bpf-next 1/3] bpf: support nocsr patterns for calls to kfuncs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Recognize nocsr patterns around kfunc calls.
For example, suppose bpf_cast_to_kern_ctx() follows nocsr contract
(which it does, it is rewritten by verifier as "r0 = r1" insn),
in such a case, rewrite BPF program below:

  r2 = 1;
  *(u64 *)(r10 - 32) = r2;
  call %[bpf_cast_to_kern_ctx];
  r2 = *(u64 *)(r10 - 32);
  r0 = r2;

Removing the spill/fill pair:

  r2 = 1;
  call %[bpf_cast_to_kern_ctx];
  r0 = r2;

Add a KF_NOCSR flag to mark kfuncs that follow nocsr contract.

Signed-off-by: Eduard Zingerman <eddyz87@xxxxxxxxx>
---
 include/linux/btf.h   |  1 +
 kernel/bpf/verifier.c | 36 ++++++++++++++++++++++++++++++++++++
 2 files changed, 37 insertions(+)

diff --git a/include/linux/btf.h b/include/linux/btf.h
index cffb43133c68..59ca37300423 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -75,6 +75,7 @@
 #define KF_ITER_NEXT    (1 << 9) /* kfunc implements BPF iter next method */
 #define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */
 #define KF_RCU_PROTECTED (1 << 11) /* kfunc should be protected by rcu cs when they are invoked */
+#define KF_NOCSR        (1 << 12) /* kfunc follows nocsr calling contract */
 
 /*
  * Tag marking a kernel function as a kfunc. This is meant to minimize the
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index df3be12096cf..c579f74be3f9 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -16140,6 +16140,28 @@ static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm)
 	}
 }
 
+/* Same as helper_nocsr_clobber_mask() but for kfuncs, see comment above */
+static u32 kfunc_nocsr_clobber_mask(struct bpf_kfunc_call_arg_meta *meta)
+{
+	const struct btf_param *params;
+	u32 vlen, i, mask;
+
+	params = btf_params(meta->func_proto);
+	vlen = btf_type_vlen(meta->func_proto);
+	mask = 0;
+	if (!btf_type_is_void(btf_type_by_id(meta->btf, meta->func_proto->type)))
+		mask |= BIT(BPF_REG_0);
+	for (i = 0; i < vlen; ++i)
+		mask |= BIT(BPF_REG_1 + i);
+	return mask;
+}
+
+/* Same as verifier_inlines_helper_call() but for kfuncs, see comment above */
+static bool verifier_inlines_kfunc_call(struct bpf_kfunc_call_arg_meta *meta)
+{
+	return false;
+}
+
 /* GCC and LLVM define a no_caller_saved_registers function attribute.
  * This attribute means that function scratches only some of
  * the caller saved registers defined by ABI.
@@ -16238,6 +16260,20 @@ static void mark_nocsr_pattern_for_call(struct bpf_verifier_env *env,
 				  bpf_jit_inlines_helper_call(call->imm));
 	}
 
+	if (bpf_pseudo_kfunc_call(call)) {
+		struct bpf_kfunc_call_arg_meta meta;
+		int err;
+
+		err = fetch_kfunc_meta(env, call, &meta, NULL);
+		if (err < 0)
+			/* error would be reported later */
+			return;
+
+		clobbered_regs_mask = kfunc_nocsr_clobber_mask(&meta);
+		can_be_inlined = (meta.kfunc_flags & KF_NOCSR) &&
+				 verifier_inlines_kfunc_call(&meta);
+	}
+
 	if (clobbered_regs_mask == ALL_CALLER_SAVED_REGS)
 		return;
 
-- 
2.45.2





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux