[PATCH 2/2] Adding BPF CFI

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Tenut <tenut@Niobium>
Subject: [PATCH 2/2] Adding BPF CFI

Check offset of BPF instructions in the interpreter to make sure the BPF program is executed from the correct starting point

Signed-off-by: Maxwell Bland <mbland@xxxxxxxxxxxx>
---
kernel/bpf/Kconfig | 10 +++++++
 kernel/bpf/core.c  | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 89 insertions(+)

diff --git a/kernel/bpf/Kconfig b/kernel/bpf/Kconfig index 7160dcaaa58a..9c64db0ddd63 100644
--- a/kernel/bpf/Kconfig
+++ b/kernel/bpf/Kconfig
@@ -94,6 +94,7 @@ config BPF_HARDENING
 	help
 	  Enhance bpf interpreter's security
 
+if BPF_HARDENING
 config BPF_NX
 bool "Enable bpf NX"
 	depends on BPF_HARDENING && !DYNAMIC_MEMORY_LAYOUT @@ -102,6 +103,15 @@ bool "Enable bpf NX"
 	  Allocate eBPF programs in seperate area and make sure the
 	  interpreted programs are in the region.
 
+config BPF_CFI
+	bool "Enable bpf CFI"
+	depends on BPF_NX
+	default n
+	help
+	  Enable alignment checks for eBPF program starting points
+
+endif
+
 source "kernel/bpf/preload/Kconfig"
 
 config BPF_LSM
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 56d9e8d4a6de..dee0d2713c3b 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -116,6 +116,75 @@ static void bpf_insn_check_range(const struct bpf_insn *insn)  }  #endif /* CONFIG_BPF_NX */
 
+#ifdef CONFIG_BPF_CFI
+#define BPF_ON  1
+#define BPF_OFF 0
+
+struct bpf_mode_flag {
+	u8 byte_array[PAGE_SIZE];
+};
+DEFINE_PER_CPU_PAGE_ALIGNED(struct bpf_mode_flag, bpf_exec_mode);
+
+static void __init lock_bpf_exec_mode(void) {
+	struct bpf_mode_flag *flag_page;
+	int cpu;
+	for_each_possible_cpu(cpu) {
+		flag_page = per_cpu_ptr(&bpf_exec_mode, cpu);
+		set_memory_ro((unsigned long)flag_page, 1);
+	};
+}
+subsys_initcall(lock_bpf_exec_mode);
+
+static void write_cr0_nocheck(unsigned long val) {
+	asm volatile("mov %0,%%cr0": "+r" (val) : : "memory"); }
+
+/*
+ * Notice that get_cpu_var also disables preemption so no
+ * extra care needed for that.
+ */
+static void enter_bpf_exec_mode(unsigned long *flagsp) {
+	struct bpf_mode_flag *flag_page;
+	flag_page = &get_cpu_var(bpf_exec_mode);
+	local_irq_save(*flagsp);
+	write_cr0_nocheck(read_cr0() & ~X86_CR0_WP);
+	flag_page->byte_array[0] = BPF_ON;
+	write_cr0_nocheck(read_cr0() | X86_CR0_WP); }
+
+static void leave_bpf_exec_mode(unsigned long *flagsp) {
+	struct bpf_mode_flag *flag_page;
+	flag_page = this_cpu_ptr(&bpf_exec_mode);
+	write_cr0_nocheck(read_cr0() & ~X86_CR0_WP);
+	flag_page->byte_array[0] = BPF_OFF;
+	write_cr0_nocheck(read_cr0() | X86_CR0_WP);
+	local_irq_restore(*flagsp);
+	put_cpu_var(bpf_exec_mode);
+}
+
+static void check_bpf_exec_mode(void)
+{
+	struct bpf_mode_flag *flag_page;
+	flag_page = this_cpu_ptr(&bpf_exec_mode);
+	BUG_ON(flag_page->byte_array[0] != BPF_ON); }
+
+static void bpf_check_cfi(const struct bpf_insn *insn) {
+	const struct bpf_prog *fp;
+	fp = container_of(insn, struct bpf_prog, insnsi[0]);
+	if (!IS_ALIGNED((unsigned long)fp, BPF_MEMORY_ALIGN))
+		BUG();
+}
+
+#else /* CONFIG_BPF_CFI */
+static void check_bpf_exec_mode(void) {} #endif /* CONFIG_BPF_CFI */
+
 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)  {
 	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags); @@ -1719,11 +1788,18 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)  #undef BPF_INSN_2_LBL
 	u32 tail_call_cnt = 0;
 
+#ifdef CONFIG_BPF_CFI
+	unsigned long flags;
+	enter_bpf_exec_mode(&flags);
+	bpf_check_cfi(insn);
+#endif
+
 #define CONT	 ({ insn++; goto select_insn; })
 #define CONT_JMP ({ insn++; goto select_insn; })
 
 select_insn:
 	bpf_insn_check_range(insn);
+	check_bpf_exec_mode();
 	goto *jumptable[insn->code];
 
 	/* Explicitly mask the register-based shift amounts with 63 or 31 @@ -2034,6 +2110,9 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
 		insn += insn->imm;
 		CONT;
 	JMP_EXIT:
+#ifdef CONFIG_BPF_CFI
+		leave_bpf_exec_mode(&flags);
+#endif
 		return BPF_R0;
 	/* JMP */
 #define COND_JMP(SIGN, OPCODE, CMP_OP)				\




[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux