The majority of random segmentation faults that I have looked at appear to be memory corruption in memory allocated using mmap and malloc. This got me thinking that there might be issues with the parisc implementation of flush_anon_page. On PA8800/PA8900 CPUs, we use flush_user_cache_page to flush anonymous pages. I modified flush_user_cache_page to leave interrupts disabled for the entire flush just to be sure the context didn't get modified mid flush. In looking at the implementation of flush_anon_page on other architectures, I noticed that they all invalidate the kernel mapping as well as flush the user page. I added code to invalidate the kernel mapping to this page in the PA8800/PA8900 path. It's possible this is also needed for other processors but I don't have a way to test. I removed using flush_data_cache when the mapping is shared. In theory, shared mappings are all equivalent, so flush_user_cache_page should flush all shared mappings. It is much faster. Lightly tested on rp3440 and c8000. Signed-off-by: John David Anglin <dave.anglin@xxxxxxxx> --- diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index ca4a302d4365..8d14a8a5d4d6 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -333,8 +333,6 @@ static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmad vmaddr &= PAGE_MASK; - preempt_disable(); - /* Set context for flush */ local_irq_save(flags); prot = mfctl(8); @@ -344,7 +342,6 @@ static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmad pgd_lock = mfctl(28); #endif switch_mm_irqs_off(NULL, vma->vm_mm, NULL); - local_irq_restore(flags); flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE); if (vma->vm_flags & VM_EXEC) @@ -352,7 +349,6 @@ static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmad flush_tlb_page(vma, vmaddr); /* Restore previous context */ - local_irq_save(flags); #ifdef CONFIG_TLB_PTLOCK mtctl(pgd_lock, 28); #endif @@ -360,8 +356,6 @@ static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmad mtsp(space, SR_USER); mtctl(prot, 8); local_irq_restore(flags); - - preempt_enable(); } static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr) @@ -543,7 +537,7 @@ void __init parisc_setup_cache_timing(void) parisc_tlb_flush_threshold/1024); } -extern void purge_kernel_dcache_page_asm(unsigned long); +extern void purge_kernel_dcache_page_asm(const void *addr); extern void clear_user_page_asm(void *, unsigned long); extern void copy_user_page_asm(void *, void *, unsigned long); @@ -558,6 +552,16 @@ void flush_kernel_dcache_page_addr(const void *addr) } EXPORT_SYMBOL(flush_kernel_dcache_page_addr); +static void purge_kernel_dcache_page_addr(const void *addr) +{ + unsigned long flags; + + purge_kernel_dcache_page_asm(addr); + purge_tlb_start(flags); + pdtlb(SR_KERNEL, addr); + purge_tlb_end(flags); +} + static void flush_cache_page_if_present(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { @@ -725,10 +729,8 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon return; if (parisc_requires_coherency()) { - if (vma->vm_flags & VM_SHARED) - flush_data_cache(); - else - flush_user_cache_page(vma, vmaddr); + flush_user_cache_page(vma, vmaddr); + purge_kernel_dcache_page_addr(page_address(page)); return; }
Attachment:
signature.asc
Description: PGP signature