Dynamic memory layout is used by KASLR and 5-level paging. CONFIG_X86_5LEVEL is going to be removed, making 5-level paging support unconditional which requires unconditional support of dynamic memory layout. Remove CONFIG_DYNAMIC_MEMORY_LAYOUT. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> --- arch/x86/Kconfig | 8 -------- arch/x86/include/asm/page_64_types.h | 4 ---- arch/x86/include/asm/pgtable_64_types.h | 6 ------ arch/x86/kernel/head64.c | 2 -- scripts/gdb/linux/pgtable.py | 4 +--- 5 files changed, 1 insertion(+), 23 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e30ea4129d2c..827928680ed6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1501,7 +1501,6 @@ config X86_PAE config X86_5LEVEL bool "Enable 5-level page tables support" default y - select DYNAMIC_MEMORY_LAYOUT select SPARSEMEM_VMEMMAP depends on X86_64 help @@ -2237,17 +2236,10 @@ config PHYSICAL_ALIGN Don't change this unless you know what you are doing. -config DYNAMIC_MEMORY_LAYOUT - bool - help - This option makes base addresses of vmalloc and vmemmap as well as - __PAGE_OFFSET movable during boot. - config RANDOMIZE_MEMORY bool "Randomize the kernel memory sections" depends on X86_64 depends on RANDOMIZE_BASE - select DYNAMIC_MEMORY_LAYOUT default RANDOMIZE_BASE help Randomizes the base virtual address of kernel memory sections diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 06ef25411d62..c2f3c50a2787 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -41,11 +41,7 @@ #define __PAGE_OFFSET_BASE_L5 _AC(0xff11000000000000, UL) #define __PAGE_OFFSET_BASE_L4 _AC(0xffff888000000000, UL) -#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT #define __PAGE_OFFSET page_offset_base -#else -#define __PAGE_OFFSET __PAGE_OFFSET_BASE_L4 -#endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */ #define __START_KERNEL_map _AC(0xffffffff80000000, UL) diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 9053dfe9fa03..09df8939b997 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -130,15 +130,9 @@ extern unsigned int ptrs_per_p4d; #define __VMEMMAP_BASE_L4 0xffffea0000000000UL #define __VMEMMAP_BASE_L5 0xffd4000000000000UL -#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT # define VMALLOC_START vmalloc_base # define VMALLOC_SIZE_TB (pgtable_l5_enabled() ? VMALLOC_SIZE_TB_L5 : VMALLOC_SIZE_TB_L4) # define VMEMMAP_START vmemmap_base -#else -# define VMALLOC_START __VMALLOC_BASE_L4 -# define VMALLOC_SIZE_TB VMALLOC_SIZE_TB_L4 -# define VMEMMAP_START __VMEMMAP_BASE_L4 -#endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */ /* * End of the region for which vmalloc page tables are pre-allocated. diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index a817ed0724d1..ec36ad7117ae 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -60,14 +60,12 @@ unsigned int ptrs_per_p4d __ro_after_init = 1; EXPORT_SYMBOL(ptrs_per_p4d); #endif -#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4; EXPORT_SYMBOL(page_offset_base); unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4; EXPORT_SYMBOL(vmalloc_base); unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4; EXPORT_SYMBOL(vmemmap_base); -#endif static inline bool check_la57_support(void) { diff --git a/scripts/gdb/linux/pgtable.py b/scripts/gdb/linux/pgtable.py index 30d837f3dfae..09aac2421fb8 100644 --- a/scripts/gdb/linux/pgtable.py +++ b/scripts/gdb/linux/pgtable.py @@ -29,11 +29,9 @@ def page_mask(level=1): raise Exception(f'Unknown page level: {level}') -#page_offset_base in case CONFIG_DYNAMIC_MEMORY_LAYOUT is disabled -POB_NO_DYNAMIC_MEM_LAYOUT = '0xffff888000000000' def _page_offset_base(): pob_symbol = gdb.lookup_global_symbol('page_offset_base') - pob = pob_symbol.name if pob_symbol else POB_NO_DYNAMIC_MEM_LAYOUT + pob = pob_symbol.name return gdb.parse_and_eval(pob) -- 2.43.0