[RFC PATCH 26/28] x86/boot: Implement support for ELF RELA/RELR relocations

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Ard Biesheuvel <ardb@xxxxxxxxxx>

Add support for standard dynamic ELF relocations to perform the virtual
relocation of the core kernel at boot. The RELR format results in a 10x
reduction in memory footprint of the relocation data, and can be
generated by the linker directly. This removes the need for
a) a host tool 'relocs' and a bespoke, clunky relocation table format
   where the table is simply concatenated to the vmlinux payload when
   building the decompressor;
b) dependence on the --emit-relocs linker switch, which dumps static,
   intermediate build time relocations into the ELF binary, to be
   subsequently used as runtime relocations.

The latter is especially problematic, as linkers may apply relaxations
that result in the code going out of sync with the static relocation
that annotated it in the input. This requires additional work on the
part of the linker to update the static relocation, which is not even
possible in all cases. Therefore, it is much better to consume a
runtime, dynamic relocation format in the way it was intended.

This will require switching to linking vmlinux in PIE mode - this is
implemented in a subsequent patch.

Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx>
---
 Documentation/arch/x86/zero-page.rst  |  3 +-
 arch/x86/Kconfig                      |  1 +
 arch/x86/include/asm/setup.h          |  1 +
 arch/x86/include/uapi/asm/bootparam.h |  2 +-
 arch/x86/kernel/head64.c              | 36 ++++++++++++++++++++
 arch/x86/kernel/head_64.S             |  5 +++
 arch/x86/kernel/vmlinux.lds.S         | 24 +++++++++----
 7 files changed, 64 insertions(+), 8 deletions(-)

diff --git a/Documentation/arch/x86/zero-page.rst b/Documentation/arch/x86/zero-page.rst
index 45aa9cceb4f1..fd18b77113e2 100644
--- a/Documentation/arch/x86/zero-page.rst
+++ b/Documentation/arch/x86/zero-page.rst
@@ -3,7 +3,7 @@
 =========
 Zero Page
 =========
-The additional fields in struct boot_params as a part of 32-bit boot
+The additional fields in struct boot_params as a part of 32/64-bit boot
 protocol of kernel. These should be filled by bootloader or 16-bit
 real-mode setup code of the kernel. References/settings to it mainly
 are in::
@@ -20,6 +20,7 @@ Offset/Size	Proto	Name			Meaning
 060/010		ALL	ist_info		Intel SpeedStep (IST) BIOS support information
 						(struct ist_info)
 070/008		ALL	acpi_rsdp_addr		Physical address of ACPI RSDP table
+078/008		64-bit	kaslr_va_shift		Virtual kASLR displacement of the core kernel
 080/010		ALL	hd0_info		hd0 disk parameter, OBSOLETE!!
 090/010		ALL	hd1_info		hd1 disk parameter, OBSOLETE!!
 0A0/010		ALL	sys_desc_table		System description table (struct sys_desc_table),
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 2852fcd82cbd..54cb1f14218b 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -26,6 +26,7 @@ config X86_64
 	depends on 64BIT
 	# Options that are inherently 64-bit kernel only:
 	select ARCH_HAS_GIGANTIC_PAGE
+	select ARCH_HAS_RELR
 	select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
 	select ARCH_SUPPORTS_PER_VMA_LOCK
 	select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 85f4fde3515c..a4d7dd81f773 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -51,6 +51,7 @@ extern void reserve_standard_io_resources(void);
 extern void i386_reserve_resources(void);
 extern unsigned long __startup_64(unsigned long p2v_offset, struct boot_params *bp);
 extern void startup_64_setup_gdt_idt(void);
+extern void startup_64_apply_relocations(struct boot_params *bp);
 extern void early_setup_idt(void);
 extern void __init do_early_exception(struct pt_regs *regs, int trapnr);
 
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 9b82eebd7add..3389b1be234c 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -120,7 +120,7 @@ struct boot_params {
 	__u64  tboot_addr;				/* 0x058 */
 	struct ist_info ist_info;			/* 0x060 */
 	__u64 acpi_rsdp_addr;				/* 0x070 */
-	__u8  _pad3[8];					/* 0x078 */
+	__u64 kaslr_va_shift;				/* 0x078 */
 	__u8  hd0_info[16];	/* obsolete! */		/* 0x080 */
 	__u8  hd1_info[16];	/* obsolete! */		/* 0x090 */
 	struct sys_desc_table sys_desc_table; /* obsolete! */	/* 0x0a0 */
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 49e8ba1c0d34..6609e1012f2f 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -20,6 +20,7 @@
 #include <linux/io.h>
 #include <linux/memblock.h>
 #include <linux/cc_platform.h>
+#include <linux/elf.h>
 #include <linux/pgtable.h>
 
 #include <asm/asm.h>
@@ -588,3 +589,38 @@ void __head startup_64_setup_gdt_idt(void)
 
 	startup_64_load_idt(handler);
 }
+
+#ifdef CONFIG_RELOCATABLE
+void __head startup_64_apply_relocations(struct boot_params *bp)
+{
+	extern const Elf64_Rela __rela_start[], __rela_end[];
+	extern const u64 __relr_start[], __relr_end[];
+	u64 va_offset = (u64)RIP_REL_REF(_text) - __START_KERNEL;
+	u64 va_shift = bp->kaslr_va_shift;
+	u64 *place = NULL;
+
+	if (!va_shift)
+		return;
+
+	for (const Elf64_Rela *r = __rela_start; r < __rela_end; r++) {
+		if (ELF64_R_TYPE(r->r_info) != R_X86_64_RELATIVE)
+			continue;
+
+		place = (u64 *)(r->r_offset + va_offset);
+		*place += va_shift;
+	}
+
+	for (const u64 *rel = __relr_start; rel < __relr_end; rel++) {
+		if ((*rel & 1) == 0) {
+			place = (u64 *)(*rel + va_offset);
+			*place++ += va_shift;
+			continue;
+		}
+
+		for (u64 *p = place, r = *rel >> 1; r; p++, r >>= 1)
+			if (r & 1)
+				*p += va_shift;
+		place += 63;
+	}
+}
+#endif
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index cc2fec3de4b7..88cdc5a0c7a3 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -74,6 +74,11 @@ SYM_CODE_START_NOALIGN(startup_64)
 	cdq
 	wrmsr
 
+#ifdef CONFIG_RELOCATABLE
+	movq	%r15, %rdi
+	call	startup_64_apply_relocations
+#endif
+
 	call	startup_64_setup_gdt_idt
 
 	/* Now switch to __KERNEL_CS so IRET works reliably */
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 52b8db931d0f..f7e832c2ac61 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -240,6 +240,18 @@ xen_elfnote_phys32_entry_offset =
 	:init
 #endif
 
+	.init.rela : {
+		__rela_start = .;
+		*(.rela.*) *(.rela_*)
+		__rela_end = .;
+	}
+
+	.init.relr : {
+		__relr_start = .;
+		*(.relr.*)
+		__relr_end = .;
+	}
+
 	/*
 	 * Section for code used exclusively before alternatives are run. All
 	 * references to such code must be patched out by alternatives, normally
@@ -469,12 +481,6 @@ xen_elfnote_phys32_entry_offset =
 		*(.got) *(.igot.*)
 	}
 	ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!")
-#endif
-
-	.plt : {
-		*(.plt) *(.plt.*) *(.iplt)
-	}
-	ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
 
 	.rel.dyn : {
 		*(.rel.*) *(.rel_*)
@@ -485,6 +491,12 @@ xen_elfnote_phys32_entry_offset =
 		*(.rela.*) *(.rela_*)
 	}
 	ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
+#endif
+
+	.plt : {
+		*(.plt) *(.plt.*) *(.iplt)
+	}
+	ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
 }
 
 /*
-- 
2.46.0.792.g87dc391469-goog





[Index of Archives]     [Kernel Newbies]     [Security]     [Netfilter]     [Bugtraq]     [Linux FS]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Video 4 Linux]     [Device Mapper]     [Linux Resources]

  Powered by Linux