Performance tuning: single threaded userspace does not benefit from speculative page faults, so we turn them off to avoid any related (small) extra overheads. Signed-off-by: Michel Lespinasse <michel@xxxxxxxxxxxxxx> --- arch/x86/mm/fault.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 48b86911a6df..b1a07ca82d59 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1318,6 +1318,10 @@ void do_user_addr_fault(struct pt_regs *regs, } #endif + /* Only try spf for multithreaded user space faults. */ + if (!(flags & FAULT_FLAG_USER) || atomic_read(&mm->mm_users) == 1) + goto no_spf; + count_vm_event(SPF_ATTEMPT); seq = mmap_seq_read_start(mm); if (seq & 1) @@ -1351,6 +1355,7 @@ void do_user_addr_fault(struct pt_regs *regs, spf_abort: count_vm_event(SPF_ABORT); +no_spf: /* * Kernel-mode access to the user address space should only occur -- 2.20.1