From: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx> Start using the maple tree Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> --- arch/parisc/kernel/cache.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 39e02227e231..f6316aafba10 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -519,9 +519,13 @@ static inline unsigned long mm_total_size(struct mm_struct *mm) { struct vm_area_struct *vma; unsigned long usize = 0; + MA_STATE(mas, &mm->mm_mt, 0, 0); - for (vma = mm->mmap; vma; vma = vma->vm_next) + rcu_read_lock(); + mas_for_each(&mas, vma, ULONG_MAX) usize += vma->vm_end - vma->vm_start; + rcu_read_unlock(); + return usize; } @@ -547,6 +551,7 @@ void flush_cache_mm(struct mm_struct *mm) { struct vm_area_struct *vma; pgd_t *pgd; + MA_STATE(mas, &mm->mm_mt, 0, 0); /* Flushing the whole cache on each cpu takes forever on rp3440, etc. So, avoid it if the mm isn't too big. */ @@ -559,17 +564,20 @@ void flush_cache_mm(struct mm_struct *mm) } if (mm->context == mfsp(3)) { - for (vma = mm->mmap; vma; vma = vma->vm_next) { + rcu_read_lock(); + mas_for_each(&mas, vma, ULONG_MAX) { flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); if (vma->vm_flags & VM_EXEC) flush_user_icache_range_asm(vma->vm_start, vma->vm_end); flush_tlb_range(vma, vma->vm_start, vma->vm_end); } + rcu_read_unlock(); return; } pgd = mm->pgd; - for (vma = mm->mmap; vma; vma = vma->vm_next) { + rcu_read_lock(); + mas_for_each(&mas, vma, ULONG_MAX) { unsigned long addr; for (addr = vma->vm_start; addr < vma->vm_end; @@ -589,6 +597,7 @@ void flush_cache_mm(struct mm_struct *mm) } } } + rcu_read_unlock(); } void flush_cache_range(struct vm_area_struct *vma, -- 2.30.2