[PATCH v1 bpf-next 8/9] bpf: add BPF_STATIC_BRANCH_UPDATE syscall

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Add a new bpf system call, BPF_STATIC_BRANCH_UPDATE, which allows users to
update static branches in BPF. Namely, this system call is executed as

    bpf(BPF_STATIC_BRANCH_UPDATE, attrs={prog_fd, insn_off, on})

where prog_fd points to a BPF program, insn_off is an _xlated_ offset in
this program, on is a boolean value to set this branch on or off.
The instruction at insn_off must be a JA with SRC_REG or'ed with
BPF_STATIC_BRANCH_JA and, optionally, with BPF_STATIC_BRANCH_INVERSE.

To implement this for a particular architecture, re-define the weak
bpf_arch_poke_static_branch() function in the corresponding bpf_jit_comp.c
This patch adds x86 implementation.

Signed-off-by: Anton Protopopov <aspsk@xxxxxxxxxxxxx>
---
 arch/x86/net/bpf_jit_comp.c    | 40 +++++++++++++++++++++++
 include/linux/bpf.h            |  2 ++
 include/linux/filter.h         |  1 +
 include/uapi/linux/bpf.h       |  7 ++++
 kernel/bpf/core.c              |  5 +++
 kernel/bpf/syscall.c           | 60 ++++++++++++++++++++++++++++++++++
 tools/include/uapi/linux/bpf.h |  7 ++++
 7 files changed, 122 insertions(+)

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index b291b5c79d26..2090713e4126 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -2099,8 +2099,17 @@ st:			if (is_imm8(insn->off))
 				if (func_idx)
 					off += bpf_prog->aux->func_info[func_idx].insn_off;
 
+				bpf_prog->aux->xlated_to_jit[off].ip = image + proglen;
 				bpf_prog->aux->xlated_to_jit[off].off = proglen;
 				bpf_prog->aux->xlated_to_jit[off].len = ilen;
+
+				/*
+				 * Save the offset so that it can later be accessed
+				 * by the bpf(BPF_STATIC_BRANCH_UPDATE) syscall
+				 */
+				if (insn->code == (BPF_JMP | BPF_JA) ||
+				    insn->code == (BPF_JMP32 | BPF_JA))
+					bpf_prog->aux->xlated_to_jit[off].jmp_offset = jmp_offset;
 			}
 		}
 		proglen += ilen;
@@ -3276,3 +3285,34 @@ bool bpf_jit_supports_ptr_xchg(void)
 {
 	return true;
 }
+
+int bpf_arch_poke_static_branch(struct bpf_prog *prog,
+				u32 insn_off,
+				bool on)
+{
+	int jmp_offset = prog->aux->xlated_to_jit[insn_off].jmp_offset;
+	u32 len = prog->aux->xlated_to_jit[insn_off].len;
+	u8 op[5];
+
+	if (WARN_ON_ONCE(is_imm8(jmp_offset) && len != 2))
+		return -EINVAL;
+
+	if (WARN_ON_ONCE(!is_imm8(jmp_offset) && len != 5))
+		return -EINVAL;
+
+	if (on) {
+		if (len == 2) {
+			op[0] = 0xEB;
+			op[1] = jmp_offset;
+		} else {
+			op[0] = 0xE9;
+			memcpy(&op[1], &jmp_offset, 4);
+		}
+	} else {
+		memcpy(op, x86_nops[len], len);
+	}
+
+	text_poke_bp(prog->aux->xlated_to_jit[insn_off].ip, op, len, NULL);
+
+	return 0;
+}
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index bdd6be718e82..1363b1fc8c09 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1528,8 +1528,10 @@ struct bpf_prog_aux {
 	 * instructions, if allocated
 	 */
 	struct {
+		void *ip;	/* the address of the jitted insn */
 		u32 off;	/* local offset in the jitted code */
 		u32 len;	/* the total len of generated jit code */
+		u32 jmp_offset;	/* jitted jump offset for BPF_JA insns */
 	} *xlated_to_jit;
 };
 
diff --git a/include/linux/filter.h b/include/linux/filter.h
index fee070b9826e..0dad44fa3af2 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -957,6 +957,7 @@ bool bpf_jit_supports_far_kfunc_call(void);
 bool bpf_jit_supports_exceptions(void);
 bool bpf_jit_supports_ptr_xchg(void);
 void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
+int bpf_arch_poke_static_branch(struct bpf_prog *prog, u32 off, bool on);
 bool bpf_helper_changes_pkt_data(void *func);
 
 static inline bool bpf_dump_raw_ok(const struct cred *cred)
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index aca5ed065731..8aafb0eddd1c 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -932,6 +932,7 @@ enum bpf_cmd {
 	BPF_LINK_DETACH,
 	BPF_PROG_BIND_MAP,
 	BPF_TOKEN_CREATE,
+	BPF_STATIC_BRANCH_UPDATE,
 	__MAX_BPF_CMD,
 };
 
@@ -1787,6 +1788,12 @@ union bpf_attr {
 		__u32		bpffs_fd;
 	} token_create;
 
+	struct { /* struct used by BPF_STATIC_BRANCH_UPDATE command */
+		__u32		prog_fd;
+		__u32		insn_off;
+		__u32		on;
+	} static_branch;
+
 } __attribute__((aligned(8)));
 
 /* The description below is an attempt at providing documentation to eBPF
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 8e99c1563a7f..fec185354ea3 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -3042,6 +3042,11 @@ static int __init bpf_global_ma_init(void)
 late_initcall(bpf_global_ma_init);
 #endif
 
+int __weak bpf_arch_poke_static_branch(struct bpf_prog *prog, u32 off, bool on)
+{
+	return -EOPNOTSUPP;
+}
+
 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
 EXPORT_SYMBOL(bpf_stats_enabled_key);
 
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 36b8fdcfba75..9e2e12a0bdfe 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1540,6 +1540,63 @@ static int map_lookup_elem(union bpf_attr *attr)
 	return err;
 }
 
+static int parse_static_branch_insn(struct bpf_insn *insn, bool *inverse)
+{
+	__u8 code = insn->code;
+
+	if (code != (BPF_JMP | BPF_JA) && code != (BPF_JMP32 | BPF_JA))
+		return -EINVAL;
+
+	if (insn->src_reg & ~BPF_STATIC_BRANCH_MASK)
+		return -EINVAL;
+
+	if (!(insn->src_reg & BPF_STATIC_BRANCH_JA))
+		return -EINVAL;
+
+	if (insn->dst_reg)
+		return -EINVAL;
+
+	*inverse = !(insn->src_reg & BPF_STATIC_BRANCH_NOP);
+
+	return 0;
+}
+
+#define BPF_STATIC_BRANCH_UPDATE_LAST_FIELD static_branch.on
+
+static int bpf_static_branch_update(union bpf_attr *attr)
+{
+	bool on = attr->static_branch.on & 1;
+	struct bpf_prog *prog;
+	u32 insn_off;
+	bool inverse;
+	int ret;
+
+	if (CHECK_ATTR(BPF_STATIC_BRANCH_UPDATE))
+		return -EINVAL;
+
+	if (attr->static_branch.on & ~1)
+		return -EINVAL;
+
+	prog = bpf_prog_get(attr->static_branch.prog_fd);
+	if (IS_ERR(prog))
+		return PTR_ERR(prog);
+
+	insn_off = attr->static_branch.insn_off;
+	if (insn_off >= prog->len) {
+		ret = -ERANGE;
+		goto put_prog;
+	}
+
+	ret = parse_static_branch_insn(&prog->insnsi[insn_off], &inverse);
+	if (ret)
+		goto put_prog;
+
+	ret = bpf_arch_poke_static_branch(prog, insn_off, on ^ inverse);
+
+put_prog:
+	bpf_prog_put(prog);
+	return ret;
+}
 
 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
 
@@ -5694,6 +5751,9 @@ static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
 	case BPF_MAP_DELETE_BATCH:
 		err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
 		break;
+	case BPF_STATIC_BRANCH_UPDATE:
+		err = bpf_static_branch_update(&attr);
+		break;
 	case BPF_LINK_CREATE:
 		err = link_create(&attr, uattr);
 		break;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index aca5ed065731..8aafb0eddd1c 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -932,6 +932,7 @@ enum bpf_cmd {
 	BPF_LINK_DETACH,
 	BPF_PROG_BIND_MAP,
 	BPF_TOKEN_CREATE,
+	BPF_STATIC_BRANCH_UPDATE,
 	__MAX_BPF_CMD,
 };
 
@@ -1787,6 +1788,12 @@ union bpf_attr {
 		__u32		bpffs_fd;
 	} token_create;
 
+	struct { /* struct used by BPF_STATIC_BRANCH_UPDATE command */
+		__u32		prog_fd;
+		__u32		insn_off;
+		__u32		on;
+	} static_branch;
+
 } __attribute__((aligned(8)));
 
 /* The description below is an attempt at providing documentation to eBPF
-- 
2.34.1





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux