This patch is on top of the parisc branch for-next. Helge Deller identified a conflict between the maple tree next branch and the parisc next branch. This patch removes the vma linked list usage in the parisc next branch identified by Helge as a conflict to maple tree "parisc: Remove mmap linked list from cache handling" and the parisc "parisc: Add vDSO support" patch. I do not expect this to apply cleanly after the maple tree next merge, but the resolution should be made clear by this patch. This is an untested conflict resolution for linux-next. Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> --- arch/parisc/kernel/cache.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index e7b8e74dad8e..05b4498a1244 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -515,12 +515,12 @@ void flush_cache_all(void) on_each_cpu(cacheflush_h_tmp_function, NULL, 1); } -static inline unsigned long mm_total_size(struct mm_struct *mm) +static inline unsigned long mm_total_size(struct vma_iterator *vmi) { struct vm_area_struct *vma; unsigned long usize = 0; - for (vma = mm->mmap; vma; vma = vma->vm_next) + for_each_vma(*vmi, vma) usize += vma->vm_end - vma->vm_start; return usize; } @@ -570,11 +570,12 @@ static void flush_user_cache_tlb(struct vm_area_struct *vma, void flush_cache_mm(struct mm_struct *mm) { struct vm_area_struct *vma; + VMA_ITERATOR(vmi, mm, 0); /* Flushing the whole cache on each cpu takes forever on rp3440, etc. So, avoid it if the mm isn't too big. */ if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && - mm_total_size(mm) >= parisc_cache_flush_threshold) { + mm_total_size(&vmi) >= parisc_cache_flush_threshold) { if (mm->context.space_id) flush_tlb_all(); flush_cache_all(); @@ -583,13 +584,13 @@ void flush_cache_mm(struct mm_struct *mm) preempt_disable(); if (mm->context.space_id == mfsp(3)) { - for (vma = mm->mmap; vma; vma = vma->vm_next) + for_each_vma(vmi, vma) flush_user_cache_tlb(vma, vma->vm_start, vma->vm_end); preempt_enable(); return; } - for (vma = mm->mmap; vma; vma = vma->vm_next) + for_each_vma(vmi, vma) flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end); preempt_enable(); } -- 2.34.1