The attached change interchanges the order of purging the TLB and setting the corresponding page table entry. TLB purges are strongly ordered. It occurred to me one night that setting the PTE first might have subtle ordering issues on SMP machines and cause random memory corruption. A TLB lock guards the insertion of user TLB entries. So after the TLB is purged, a new entry can't be inserted until the lock is released. This ensures that the new PTE value is used when the lock is released. Since making this change, no random segmentation faults have been observed on the Debian hppa buildd servers. Signed-off-by: John David Anglin <dave.anglin@xxxxxxxx>
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index c2c43f7..3a4ed9f 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -65,9 +65,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) unsigned long flags; \ spin_lock_irqsave(&pa_tlb_lock, flags); \ old_pte = *ptep; \ - set_pte(ptep, pteval); \ if (pte_inserted(old_pte)) \ purge_tlb_entries(mm, addr); \ + set_pte(ptep, pteval); \ spin_unlock_irqrestore(&pa_tlb_lock, flags); \ } while (0) @@ -478,8 +478,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned spin_unlock_irqrestore(&pa_tlb_lock, flags); return 0; } - set_pte(ptep, pte_mkold(pte)); purge_tlb_entries(vma->vm_mm, addr); + set_pte(ptep, pte_mkold(pte)); spin_unlock_irqrestore(&pa_tlb_lock, flags); return 1; } @@ -492,9 +492,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, spin_lock_irqsave(&pa_tlb_lock, flags); old_pte = *ptep; - set_pte(ptep, __pte(0)); if (pte_inserted(old_pte)) purge_tlb_entries(mm, addr); + set_pte(ptep, __pte(0)); spin_unlock_irqrestore(&pa_tlb_lock, flags); return old_pte; @@ -504,8 +504,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, { unsigned long flags; spin_lock_irqsave(&pa_tlb_lock, flags); - set_pte(ptep, pte_wrprotect(*ptep)); purge_tlb_entries(mm, addr); + set_pte(ptep, pte_wrprotect(*ptep)); spin_unlock_irqrestore(&pa_tlb_lock, flags); }
-- John David Anglin dave.anglin@xxxxxxxx