[PATCH RFC 43/43] x86/boot: Extend relocate range for PIE kernel image

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Allow PIE kernel image to be relocated in unused holes in top 512G of
address space.

Suggested-by: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx>
Signed-off-by: Hou Wenlong <houwenlong.hwl@xxxxxxxxxxxx>
Cc: Thomas Garnier <thgarnie@xxxxxxxxxxxx>
Cc: Kees Cook <keescook@xxxxxxxxxxxx>
---
 Documentation/x86/x86_64/mm.rst  |  4 +++
 arch/x86/Kconfig                 | 11 +++++++
 arch/x86/boot/compressed/kaslr.c | 55 ++++++++++++++++++++++++++++++++
 arch/x86/boot/compressed/misc.c  |  4 ++-
 arch/x86/boot/compressed/misc.h  |  9 ++++++
 5 files changed, 82 insertions(+), 1 deletion(-)

diff --git a/Documentation/x86/x86_64/mm.rst b/Documentation/x86/x86_64/mm.rst
index 35e5e18c83d0..b456501a5b69 100644
--- a/Documentation/x86/x86_64/mm.rst
+++ b/Documentation/x86/x86_64/mm.rst
@@ -149,6 +149,10 @@ Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
 physical memory, vmalloc/ioremap space and virtual memory map are randomized.
 Their order is preserved but their base will be offset early at boot time.
 
+Note that if EXTENDED_RANDOMIZE_BASE is enabled, the kernel image area
+including kernel image, module area and fixmap area is randomized as a whole
+in top 512G of address space.
+
 Be very careful vs. KASLR when changing anything here. The KASLR address
 range must not overlap with anything except the KASAN shadow area, which is
 correct as KASAN disables KASLR.
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9f8020991184..6d18d4333389 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2266,6 +2266,17 @@ config RANDOMIZE_BASE
 
 	  If unsure, say Y.
 
+config EXTENDED_RANDOMIZE_BASE
+	bool "Randomize the address of the kernel image (PIE)"
+	default y
+	depends on X86_PIE && RANDOMIZE_BASE
+	help
+	  This packs kernel image, module area and fixmap area as a
+	  whole, and allows it to be randomized in top 512G of virtual
+	  address space when PIE is enabled.
+
+	  If unsure, say Y.
+
 # Relocation on x86 needs some additional build support
 config X86_NEED_RELOCS
 	def_bool y
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 454757fbdfe5..e0e092fe7fe2 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -871,3 +871,58 @@ void choose_random_location(unsigned long input,
 		random_addr = find_random_virt_addr(LOAD_PHYSICAL_ADDR, output_size);
 	*virt_addr = random_addr;
 }
+
+#ifdef CONFIG_EXTENDED_RANDOMIZE_BASE
+struct kernel_image_slot {
+	unsigned long start;
+	unsigned long end;
+	unsigned long pud_slots;
+};
+
+/*
+ * Currently, there are two unused hole in top 512G, see
+ * Documentation/x86/x86_64/mm.rst, use the hole as the kernel image base.
+ */
+struct kernel_image_slot available_slots[] = {
+	{
+		.start = 0xffffff8000000000UL,
+		.end = 0xffffffeeffffffffUL,
+	},
+	{
+		.start = 0xffffffff00000000UL,
+		.end = 0xffffffffffffffffUL,
+	},
+};
+
+unsigned long pie_randomize(void)
+{
+	unsigned long total, slot;
+	int i;
+
+	if (cmdline_find_option_bool("nokaslr"))
+		return 0;
+
+	total = 0;
+	for (i = 0; i < ARRAY_SIZE(available_slots); i++) {
+		available_slots[i].pud_slots = (available_slots[i].end -
+						available_slots[i].start + 1UL) /
+						PUD_SIZE - 1UL;
+		total += available_slots[i].pud_slots;
+	}
+
+	slot = kaslr_get_random_long("PIE slot") % total;
+	for (i = 0; i < ARRAY_SIZE(available_slots); i++) {
+		if (slot < available_slots[i].pud_slots)
+			break;
+
+		slot -= available_slots[i].pud_slots;
+	}
+
+	if (i == ARRAY_SIZE(available_slots) || slot >= available_slots[i].pud_slots) {
+		warn("PIE randomize disabled: available slots are bad!");
+		return 0;
+	}
+
+	return (available_slots[i].start + slot * PUD_SIZE) - __START_KERNEL_map;
+}
+#endif
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 014ff222bf4b..e111b55edb8b 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -210,8 +210,10 @@ static void handle_relocations(void *output, unsigned long output_len,
 	 * needed if KASLR has chosen a different starting address offset
 	 * from __START_KERNEL_map.
 	 */
-	if (IS_ENABLED(CONFIG_X86_64))
+	if (IS_ENABLED(CONFIG_X86_64)) {
 		delta = virt_addr - LOAD_PHYSICAL_ADDR;
+		delta += pie_randomize();
+	}
 
 	if (!delta) {
 		debug_putstr("No relocation needed... ");
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 2f155a0e3041..f50717092902 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -113,6 +113,15 @@ static inline void choose_random_location(unsigned long input,
 }
 #endif
 
+#ifdef CONFIG_EXTENDED_RANDOMIZE_BASE
+unsigned long pie_randomize(void);
+#else
+static inline unsigned long pie_randomize(void)
+{
+	return 0;
+}
+#endif
+
 /* cpuflags.c */
 bool has_cpuflag(int flag);
 
-- 
2.31.1




[Index of Archives]     [Kernel Newbies]     [Security]     [Netfilter]     [Bugtraq]     [Linux FS]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Video 4 Linux]     [Device Mapper]     [Linux Resources]

  Powered by Linux