Let's synchronize all accesses to the 1:1 and vmemmap mappings. This will be especially relevant when wanting to cleanup empty page tables that could be shared by both. Avoid races when removing tables that might be just about to get reused. Cc: Heiko Carstens <heiko.carstens@xxxxxxxxxx> Cc: Vasily Gorbik <gor@xxxxxxxxxxxxx> Cc: Christian Borntraeger <borntraeger@xxxxxxxxxx> Cc: Gerald Schaefer <gerald.schaefer@xxxxxxxxxx> Signed-off-by: David Hildenbrand <david@xxxxxxxxxx> --- arch/s390/mm/vmem.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index bcddabd509da8..aa968f67d7f9f 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -293,6 +293,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, pte_t *pt_dir; int ret = -ENOMEM; + mutex_lock(&vmem_mutex); pgt_prot = pgprot_val(PAGE_KERNEL); sgt_prot = pgprot_val(SEGMENT_KERNEL); if (!MACHINE_HAS_NX) { @@ -364,6 +365,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, } ret = 0; out: + mutex_unlock(&vmem_mutex); if (ret) vmemmap_free(start, end, altmap); return ret; @@ -372,7 +374,9 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) { + mutex_lock(&vmem_mutex); remove_pagetable(start, end, false); + mutex_unlock(&vmem_mutex); } void vmem_remove_mapping(unsigned long start, unsigned long size) -- 2.26.2