Add an option so the module section is just after the mapped kernel. It will ensure position independent modules are always at the right distance from the kernel and do not require mcmodule=large. It also optimize the available size for modules by getting rid of the empty space on kernel randomization range. Signed-off-by: Thomas Garnier <thgarnie@xxxxxxxxxx> --- Documentation/x86/x86_64/mm.txt | 3 +++ arch/x86/Kconfig | 4 ++++ arch/x86/include/asm/pgtable_64_types.h | 6 +++++- arch/x86/kernel/head64.c | 5 ++++- arch/x86/mm/dump_pagetables.c | 4 ++-- 5 files changed, 18 insertions(+), 4 deletions(-) diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index b0798e281aa6..b51d66386e32 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -73,4 +73,7 @@ Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all physical memory, vmalloc/ioremap space and virtual memory map are randomized. Their order is preserved but their base will be offset early at boot time. +If CONFIG_DYNAMIC_MODULE_BASE is enabled, the module section follows the end of +the mapped kernel. + -Andi Kleen, Jul 2004 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 777197fab6dd..1e4b399c64e5 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2133,6 +2133,10 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING If unsure, leave at the default value. +# Module section starts just after the end of the kernel module +config DYNAMIC_MODULE_BASE + bool + config X86_GLOBAL_STACKPROTECTOR bool depends on CC_STACKPROTECTOR diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 06470da156ba..e00fc429b898 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -6,6 +6,7 @@ #ifndef __ASSEMBLY__ #include <linux/types.h> #include <asm/kaslr.h> +#include <asm/sections.h> /* * These are used to make use of C type-checking.. @@ -18,7 +19,6 @@ typedef unsigned long pgdval_t; typedef unsigned long pgprotval_t; typedef struct { pteval_t pte; } pte_t; - #endif /* !__ASSEMBLY__ */ #define SHARED_KERNEL_PMD 0 @@ -93,7 +93,11 @@ typedef struct { pteval_t pte; } pte_t; #define VMEMMAP_START __VMEMMAP_BASE #endif /* CONFIG_RANDOMIZE_MEMORY */ #define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) +#ifdef CONFIG_DYNAMIC_MODULE_BASE +#define MODULES_VADDR ALIGN(((unsigned long)_end + PAGE_SIZE), PMD_SIZE) +#else #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) +#endif /* The module sections ends with the start of the fixmap */ #define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) #define MODULES_LEN (MODULES_END - MODULES_VADDR) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 675f1dba3b21..b6363f0d11a7 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -321,12 +321,15 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) * Build-time sanity checks on the kernel image and module * area mappings. (these are purely build-time and produce no code) */ +#ifndef CONFIG_DYNAMIC_MODULE_BASE BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map); BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE); - BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); + BUILD_BUG_ON(!IS_ENABLED(CONFIG_RANDOMIZE_BASE_LARGE) && + MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0); BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); +#endif BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == (__START_KERNEL & PGDIR_MASK))); BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END); diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 8691a57da63e..8565b2b45848 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -95,7 +95,7 @@ static struct addr_marker address_markers[] = { { EFI_VA_END, "EFI Runtime Services" }, # endif { __START_KERNEL_map, "High Kernel Mapping" }, - { MODULES_VADDR, "Modules" }, + { 0/* MODULES_VADDR */, "Modules" }, { MODULES_END, "End Modules" }, #else { PAGE_OFFSET, "Kernel Mapping" }, @@ -529,7 +529,7 @@ static int __init pt_dump_init(void) # endif address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; #endif - + address_markers[MODULES_VADDR_NR].start_address = MODULES_VADDR; return 0; } __initcall(pt_dump_init); -- 2.14.2.920.gcf0c67979c-goog _______________________________________________ Virtualization mailing list Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/virtualization