This basically restores slightly modified version of original sync_global_pgds() which we had before folded p4d was introduced. The only modification is protection against 'address' overflow. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> --- arch/x86/mm/init_64.c | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index a242139df8fe..0b62b13e8655 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -92,6 +92,40 @@ __setup("noexec32=", nonx32_setup); * When memory was added make sure all the processes MM have * suitable PGD entries in the local PGD level page. */ +#ifdef CONFIG_X86_5LEVEL +void sync_global_pgds(unsigned long start, unsigned long end) +{ + unsigned long address; + + for (address = start; address <= end && address >= start; address += PGDIR_SIZE) { + const pgd_t *pgd_ref = pgd_offset_k(address); + struct page *page; + + if (pgd_none(*pgd_ref)) + continue; + + spin_lock(&pgd_lock); + list_for_each_entry(page, &pgd_list, lru) { + pgd_t *pgd; + spinlock_t *pgt_lock; + + pgd = (pgd_t *)page_address(page) + pgd_index(address); + /* the pgt_lock only for Xen */ + pgt_lock = &pgd_page_get_mm(page)->page_table_lock; + spin_lock(pgt_lock); + + if (!pgd_none(*pgd_ref) && !pgd_none(*pgd)) + BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); + + if (pgd_none(*pgd)) + set_pgd(pgd, *pgd_ref); + + spin_unlock(pgt_lock); + } + spin_unlock(&pgd_lock); + } +} +#else void sync_global_pgds(unsigned long start, unsigned long end) { unsigned long address; @@ -135,6 +169,7 @@ void sync_global_pgds(unsigned long start, unsigned long end) spin_unlock(&pgd_lock); } } +#endif /* * NOTE: This function is marked __ref because it calls __init function -- 2.11.0