bottom-up style is useless in x86_64 any longer, isolate it. Later, it may be removed completely from x86. Signed-off-by: Pingfan Liu <kernelfans@xxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Borislav Petkov <bp@xxxxxxxxx> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: "Rafael J. Wysocki" <rjw@xxxxxxxxxxxxx> Cc: Len Brown <lenb@xxxxxxxxxx> Cc: Yinghai Lu <yinghai@xxxxxxxxxx> Cc: Tejun Heo <tj@xxxxxxxxxx> Cc: Chao Fan <fanc.fnst@xxxxxxxxxxxxxx> Cc: Baoquan He <bhe@xxxxxxxxxx> Cc: Juergen Gross <jgross@xxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Mike Rapoport <rppt@xxxxxxxxxxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: x86@xxxxxxxxxx Cc: linux-acpi@xxxxxxxxxxxxxxx Cc: linux-mm@xxxxxxxxx --- arch/x86/mm/init.c | 153 +--------------------------------------------- arch/x86/mm/init_32.c | 147 ++++++++++++++++++++++++++++++++++++++++++++ arch/x86/mm/mm_internal.h | 8 ++- 3 files changed, 155 insertions(+), 153 deletions(-) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 003ad77..6a853e4 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -502,7 +502,7 @@ unsigned long __ref init_memory_mapping(unsigned long start, * That range would have hole in the middle or ends, and only ram parts * will be mapped in init_range_memory_mapping(). */ -static unsigned long __init init_range_memory_mapping( +unsigned long __init init_range_memory_mapping( unsigned long r_start, unsigned long r_end) { @@ -530,157 +530,6 @@ static unsigned long __init init_range_memory_mapping( return mapped_ram_size; } -#ifdef CONFIG_X86_32 - -static unsigned long min_pfn_mapped; - -static unsigned long __init get_new_step_size(unsigned long step_size) -{ - /* - * Initial mapped size is PMD_SIZE (2M). - * We can not set step_size to be PUD_SIZE (1G) yet. - * In worse case, when we cross the 1G boundary, and - * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k) - * to map 1G range with PTE. Hence we use one less than the - * difference of page table level shifts. - * - * Don't need to worry about overflow in the top-down case, on 32bit, - * when step_size is 0, round_down() returns 0 for start, and that - * turns it into 0x100000000ULL. - * In the bottom-up case, round_up(x, 0) returns 0 though too, which - * needs to be taken into consideration by the code below. - */ - return step_size << (PMD_SHIFT - PAGE_SHIFT - 1); -} - -/** - * memory_map_top_down - Map [map_start, map_end) top down - * @map_start: start address of the target memory range - * @map_end: end address of the target memory range - * - * This function will setup direct mapping for memory range - * [map_start, map_end) in top-down. That said, the page tables - * will be allocated at the end of the memory, and we map the - * memory in top-down. - */ -static void __init memory_map_top_down(unsigned long map_start, - unsigned long map_end) -{ - unsigned long real_end, start, last_start; - unsigned long step_size; - unsigned long addr; - unsigned long mapped_ram_size = 0; - - /* xen has big range in reserved near end of ram, skip it at first.*/ - addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); - real_end = addr + PMD_SIZE; - - /* step_size need to be small so pgt_buf from BRK could cover it */ - step_size = PMD_SIZE; - max_pfn_mapped = 0; /* will get exact value next */ - min_pfn_mapped = real_end >> PAGE_SHIFT; - last_start = start = real_end; - - /* - * We start from the top (end of memory) and go to the bottom. - * The memblock_find_in_range() gets us a block of RAM from the - * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages - * for page table. - */ - while (last_start > map_start) { - if (last_start > step_size) { - start = round_down(last_start - 1, step_size); - if (start < map_start) - start = map_start; - } else - start = map_start; - mapped_ram_size += init_range_memory_mapping(start, - last_start); - set_alloc_range(min_pfn_mapped, max_pfn_mapped); - last_start = start; - min_pfn_mapped = last_start >> PAGE_SHIFT; - if (mapped_ram_size >= step_size) - step_size = get_new_step_size(step_size); - } - - if (real_end < map_end) { - init_range_memory_mapping(real_end, map_end); - set_alloc_range(min_pfn_mapped, max_pfn_mapped); - } -} - -/** - * memory_map_bottom_up - Map [map_start, map_end) bottom up - * @map_start: start address of the target memory range - * @map_end: end address of the target memory range - * - * This function will setup direct mapping for memory range - * [map_start, map_end) in bottom-up. Since we have limited the - * bottom-up allocation above the kernel, the page tables will - * be allocated just above the kernel and we map the memory - * in [map_start, map_end) in bottom-up. - */ -static void __init memory_map_bottom_up(unsigned long map_start, - unsigned long map_end) -{ - unsigned long next, start; - unsigned long mapped_ram_size = 0; - /* step_size need to be small so pgt_buf from BRK could cover it */ - unsigned long step_size = PMD_SIZE; - - start = map_start; - min_pfn_mapped = start >> PAGE_SHIFT; - - /* - * We start from the bottom (@map_start) and go to the top (@map_end). - * The memblock_find_in_range() gets us a block of RAM from the - * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages - * for page table. - */ - while (start < map_end) { - if (step_size && map_end - start > step_size) { - next = round_up(start + 1, step_size); - if (next > map_end) - next = map_end; - } else { - next = map_end; - } - - mapped_ram_size += init_range_memory_mapping(start, next); - set_alloc_range(min_pfn_mapped, max_pfn_mapped); - start = next; - - if (mapped_ram_size >= step_size) - step_size = get_new_step_size(step_size); - } -} - -static unsigned long __init init_range_memory_mapping32( - unsigned long r_start, unsigned long r_end) -{ - /* - * If the allocation is in bottom-up direction, we setup direct mapping - * in bottom-up, otherwise we setup direct mapping in top-down. - */ - if (memblock_bottom_up()) { - unsigned long kernel_end = __pa_symbol(_end); - - /* - * we need two separate calls here. This is because we want to - * allocate page tables above the kernel. So we first map - * [kernel_end, end) to make memory above the kernel be mapped - * as soon as possible. And then use page tables allocated above - * the kernel to map [ISA_END_ADDRESS, kernel_end). - */ - memory_map_bottom_up(kernel_end, r_end); - memory_map_bottom_up(r_start, kernel_end); - } else { - memory_map_top_down(r_start, r_end); - } -} - -#endif - void __init init_mem_mapping(void) { unsigned long end; diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 49ecf5e..f802678 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -550,6 +550,153 @@ void __init early_ioremap_page_table_range_init(void) early_ioremap_reset(); } +static unsigned long min_pfn_mapped; + +static unsigned long __init get_new_step_size(unsigned long step_size) +{ + /* + * Initial mapped size is PMD_SIZE (2M). + * We can not set step_size to be PUD_SIZE (1G) yet. + * In worse case, when we cross the 1G boundary, and + * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k) + * to map 1G range with PTE. Hence we use one less than the + * difference of page table level shifts. + * + * Don't need to worry about overflow in the top-down case, on 32bit, + * when step_size is 0, round_down() returns 0 for start, and that + * turns it into 0x100000000ULL. + * In the bottom-up case, round_up(x, 0) returns 0 though too, which + * needs to be taken into consideration by the code below. + */ + return step_size << (PMD_SHIFT - PAGE_SHIFT - 1); +} + +/** + * memory_map_top_down - Map [map_start, map_end) top down + * @map_start: start address of the target memory range + * @map_end: end address of the target memory range + * + * This function will setup direct mapping for memory range + * [map_start, map_end) in top-down. That said, the page tables + * will be allocated at the end of the memory, and we map the + * memory in top-down. + */ +static void __init memory_map_top_down(unsigned long map_start, + unsigned long map_end) +{ + unsigned long real_end, start, last_start; + unsigned long step_size; + unsigned long addr; + unsigned long mapped_ram_size = 0; + + /* xen has big range in reserved near end of ram, skip it at first.*/ + addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); + real_end = addr + PMD_SIZE; + + /* step_size need to be small so pgt_buf from BRK could cover it */ + step_size = PMD_SIZE; + max_pfn_mapped = 0; /* will get exact value next */ + min_pfn_mapped = real_end >> PAGE_SHIFT; + last_start = start = real_end; + + /* + * We start from the top (end of memory) and go to the bottom. + * The memblock_find_in_range() gets us a block of RAM from the + * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages + * for page table. + */ + while (last_start > map_start) { + if (last_start > step_size) { + start = round_down(last_start - 1, step_size); + if (start < map_start) + start = map_start; + } else + start = map_start; + mapped_ram_size += init_range_memory_mapping(start, + last_start); + set_alloc_range(min_pfn_mapped, max_pfn_mapped); + last_start = start; + min_pfn_mapped = last_start >> PAGE_SHIFT; + if (mapped_ram_size >= step_size) + step_size = get_new_step_size(step_size); + } + + if (real_end < map_end) { + init_range_memory_mapping(real_end, map_end); + set_alloc_range(min_pfn_mapped, max_pfn_mapped); + } +} + +/** + * memory_map_bottom_up - Map [map_start, map_end) bottom up + * @map_start: start address of the target memory range + * @map_end: end address of the target memory range + * + * This function will setup direct mapping for memory range + * [map_start, map_end) in bottom-up. Since we have limited the + * bottom-up allocation above the kernel, the page tables will + * be allocated just above the kernel and we map the memory + * in [map_start, map_end) in bottom-up. + */ +static void __init memory_map_bottom_up(unsigned long map_start, + unsigned long map_end) +{ + unsigned long next, start; + unsigned long mapped_ram_size = 0; + /* step_size need to be small so pgt_buf from BRK could cover it */ + unsigned long step_size = PMD_SIZE; + + start = map_start; + min_pfn_mapped = start >> PAGE_SHIFT; + + /* + * We start from the bottom (@map_start) and go to the top (@map_end). + * The memblock_find_in_range() gets us a block of RAM from the + * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages + * for page table. + */ + while (start < map_end) { + if (step_size && map_end - start > step_size) { + next = round_up(start + 1, step_size); + if (next > map_end) + next = map_end; + } else { + next = map_end; + } + + mapped_ram_size += init_range_memory_mapping(start, next); + set_alloc_range(min_pfn_mapped, max_pfn_mapped); + start = next; + + if (mapped_ram_size >= step_size) + step_size = get_new_step_size(step_size); + } +} + +void __init init_range_memory_mapping32( + unsigned long r_start, unsigned long r_end) +{ + /* + * If the allocation is in bottom-up direction, we setup direct mapping + * in bottom-up, otherwise we setup direct mapping in top-down. + */ + if (memblock_bottom_up()) { + unsigned long kernel_end = __pa_symbol(_end); + + /* + * we need two separate calls here. This is because we want to + * allocate page tables above the kernel. So we first map + * [kernel_end, end) to make memory above the kernel be mapped + * as soon as possible. And then use page tables allocated above + * the kernel to map [ISA_END_ADDRESS, kernel_end). + */ + memory_map_bottom_up(kernel_end, r_end); + memory_map_bottom_up(r_start, kernel_end); + } else { + memory_map_top_down(r_start, r_end); + } +} + static void __init pagetable_init(void) { pgd_t *pgd_base = swapper_pg_dir; diff --git a/arch/x86/mm/mm_internal.h b/arch/x86/mm/mm_internal.h index 4e1f6e1..5ab133c 100644 --- a/arch/x86/mm/mm_internal.h +++ b/arch/x86/mm/mm_internal.h @@ -9,7 +9,13 @@ static inline void *alloc_low_page(void) } void early_ioremap_page_table_range_init(void); - +void init_range_memory_mapping32( + unsigned long r_start, + unsigned long r_end); +void set_alloc_range(unsigned long low, unsigned long high); +unsigned long __init init_range_memory_mapping( + unsigned long r_start, + unsigned long r_end); unsigned long kernel_physical_mapping_init(unsigned long start, unsigned long end, unsigned long page_size_mask); -- 2.7.4