This patch enable the speculative page fault on the PowerPC architecture. This will try a speculative page fault without holding the mmap_sem, if it returns with VM_FAULT_RETRY, the mmap_sem is acquired and the traditional page fault processing is done. The speculative path is only tried for multithreaded process as there is no risk of contention on the mmap_sem otherwise. Build on if CONFIG_SPECULATIVE_PAGE_FAULT is defined (currently for BOOK3S_64 && SMP). Signed-off-by: Laurent Dufour <ldufour@xxxxxxxxxxxxxxxxxx> --- arch/powerpc/mm/fault.c | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 866446cf2d9a..104f3cc86b51 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -392,6 +392,9 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code) { struct vm_area_struct * vma; +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + struct vm_area_struct *spf_vma = NULL; +#endif struct mm_struct *mm = current->mm; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; int is_exec = TRAP(regs) == 0x400; @@ -459,6 +462,20 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, if (is_exec) flags |= FAULT_FLAG_INSTRUCTION; +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + if (is_user && (atomic_read(&mm->mm_users) > 1)) { + /* let's try a speculative page fault without grabbing the + * mmap_sem. + */ + fault = handle_speculative_fault(mm, address, flags, &spf_vma); + if (!(fault & VM_FAULT_RETRY)) { + perf_sw_event(PERF_COUNT_SW_SPF, 1, + regs, address); + goto done; + } + } +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ + /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunately, in the case of an @@ -489,7 +506,16 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, might_sleep(); } - vma = find_vma(mm, address); +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + if (spf_vma) { + if (can_reuse_spf_vma(spf_vma, address)) + vma = spf_vma; + else + vma = find_vma(mm, address); + spf_vma = NULL; + } else +#endif + vma = find_vma(mm, address); if (unlikely(!vma)) return bad_area(regs, address); if (likely(vma->vm_start <= address)) @@ -568,6 +594,9 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, up_read(¤t->mm->mmap_sem); +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +done: +#endif if (unlikely(fault & VM_FAULT_ERROR)) return mm_fault_error(regs, address, fault); -- 2.7.4