This is a note to let you know that I've just added the patch titled x86/uaccess: Use __uaccess_begin_nospec() and uaccess_try_nospec to the 4.15-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: x86uaccess_Use___uaccess_begin_nospec()_and_uaccess_try_nospec.patch and it can be found in the queue-4.15 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. Subject: x86/uaccess: Use __uaccess_begin_nospec() and uaccess_try_nospec From: Dan Williams dan.j.williams@xxxxxxxxx Date: Mon Jan 29 17:02:49 2018 -0800 From: Dan Williams dan.j.williams@xxxxxxxxx commit 304ec1b050310548db33063e567123fae8fd0301 Quoting Linus: I do think that it would be a good idea to very expressly document the fact that it's not that the user access itself is unsafe. I do agree that things like "get_user()" want to be protected, but not because of any direct bugs or problems with get_user() and friends, but simply because get_user() is an excellent source of a pointer that is obviously controlled from a potentially attacking user space. So it's a prime candidate for then finding _subsequent_ accesses that can then be used to perturb the cache. __uaccess_begin_nospec() covers __get_user() and copy_from_iter() where the limit check is far away from the user pointer de-reference. In those cases a barrier_nospec() prevents speculation with a potential pointer to privileged memory. uaccess_try_nospec covers get_user_try. Suggested-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Suggested-by: Andi Kleen <ak@xxxxxxxxxxxxxxx> Signed-off-by: Dan Williams <dan.j.williams@xxxxxxxxx> Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: linux-arch@xxxxxxxxxxxxxxx Cc: Kees Cook <keescook@xxxxxxxxxxxx> Cc: kernel-hardening@xxxxxxxxxxxxxxxxxx Cc: gregkh@xxxxxxxxxxxxxxxxxxx Cc: Al Viro <viro@xxxxxxxxxxxxxxxxxx> Cc: alan@xxxxxxxxxxxxxxx Link: https://lkml.kernel.org/r/151727416953.33451.10508284228526170604.stgit@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/x86/include/asm/uaccess.h | 6 +++--- arch/x86/include/asm/uaccess_32.h | 6 +++--- arch/x86/include/asm/uaccess_64.h | 12 ++++++------ arch/x86/lib/usercopy_32.c | 4 ++-- 4 files changed, 14 insertions(+), 14 deletions(-) --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -450,7 +450,7 @@ do { \ ({ \ int __gu_err; \ __inttype(*(ptr)) __gu_val; \ - __uaccess_begin(); \ + __uaccess_begin_nospec(); \ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ __uaccess_end(); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ @@ -557,7 +557,7 @@ struct __large_struct { unsigned long bu * get_user_ex(...); * } get_user_catch(err) */ -#define get_user_try uaccess_try +#define get_user_try uaccess_try_nospec #define get_user_catch(err) uaccess_catch(err) #define get_user_ex(x, ptr) do { \ @@ -591,7 +591,7 @@ extern void __cmpxchg_wrong_size(void) __typeof__(ptr) __uval = (uval); \ __typeof__(*(ptr)) __old = (old); \ __typeof__(*(ptr)) __new = (new); \ - __uaccess_begin(); \ + __uaccess_begin_nospec(); \ switch (size) { \ case 1: \ { \ --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -29,21 +29,21 @@ raw_copy_from_user(void *to, const void switch (n) { case 1: ret = 0; - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u8 *)to, from, ret, "b", "b", "=q", 1); __uaccess_end(); return ret; case 2: ret = 0; - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u16 *)to, from, ret, "w", "w", "=r", 2); __uaccess_end(); return ret; case 4: ret = 0; - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u32 *)to, from, ret, "l", "k", "=r", 4); __uaccess_end(); --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -55,31 +55,31 @@ raw_copy_from_user(void *dst, const void return copy_user_generic(dst, (__force void *)src, size); switch (size) { case 1: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src, ret, "b", "b", "=q", 1); __uaccess_end(); return ret; case 2: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src, ret, "w", "w", "=r", 2); __uaccess_end(); return ret; case 4: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src, ret, "l", "k", "=r", 4); __uaccess_end(); return ret; case 8: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 8); __uaccess_end(); return ret; case 10: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 10); if (likely(!ret)) @@ -89,7 +89,7 @@ raw_copy_from_user(void *dst, const void __uaccess_end(); return ret; case 16: - __uaccess_begin(); + __uaccess_begin_nospec(); __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 16); if (likely(!ret)) --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -331,7 +331,7 @@ do { \ unsigned long __copy_user_ll(void *to, const void *from, unsigned long n) { - __uaccess_begin(); + __uaccess_begin_nospec(); if (movsl_is_ok(to, from, n)) __copy_user(to, from, n); else @@ -344,7 +344,7 @@ EXPORT_SYMBOL(__copy_user_ll); unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long n) { - __uaccess_begin(); + __uaccess_begin_nospec(); #ifdef CONFIG_X86_INTEL_USERCOPY if (n > 64 && static_cpu_has(X86_FEATURE_XMM2)) n = __copy_user_intel_nocache(to, from, n); Patches currently in stable-queue which might be from torvalds@xxxxxxxxxxxxxxxxxxxx are queue-4.15/objtool_Add_support_for_alternatives_at_the_end_of_a_section.patch queue-4.15/x86pti_Do_not_enable_PTI_on_CPUs_which_are_not_vulnerable_to_Meltdown.patch queue-4.15/x86_Introduce_barrier_nospec.patch queue-4.15/x86speculation_Use_Indirect_Branch_Prediction_Barrier_in_context_switch.patch queue-4.15/x86get_user_Use_pointer_masking_to_limit_speculation.patch queue-4.15/x86_Introduce___uaccess_begin_nospec()_and_uaccess_try_nospec.patch queue-4.15/x86cpufeature_Blacklist_SPEC_CTRLPRED_CMD_on_early_Spectre_v2_microcodes.patch queue-4.15/x86cpufeatures_Add_Intel_feature_bits_for_Speculation_Control.patch queue-4.15/x86paravirt_Remove_noreplace-paravirt_cmdline_option.patch queue-4.15/KVM_VMX_Make_indirect_call_speculation_safe.patch queue-4.15/x86msr_Add_definitions_for_new_speculation_control_MSRs.patch queue-4.15/x86alternative_Print_unadorned_pointers.patch queue-4.15/KVMVMX_Allow_direct_access_to_MSR_IA32_SPEC_CTRL.patch queue-4.15/x86cpufeatures_Add_CPUID_7_EDX_CPUID_leaf.patch queue-4.15/array_index_nospec_Sanitize_speculative_array_de-references.patch queue-4.15/Documentation_Document_array_index_nospec.patch queue-4.15/x86entry64_Remove_the_SYSCALL64_fast_path.patch queue-4.15/x86bugs_Drop_one_mitigation_from_dmesg.patch queue-4.15/x86cpufeatures_Add_AMD_feature_bits_for_Speculation_Control.patch queue-4.15/KVMSVM_Allow_direct_access_to_MSR_IA32_SPEC_CTRL.patch queue-4.15/x86asm_Move_status_from_thread_struct_to_thread_info.patch queue-4.15/KVMx86_Add_IBPB_support.patch queue-4.15/x86_Implement_array_index_mask_nospec.patch queue-4.15/KVMVMX_Emulate_MSR_IA32_ARCH_CAPABILITIES.patch queue-4.15/nl80211_Sanitize_array_index_in_parse_txq_params.patch queue-4.15/moduleretpoline_Warn_about_missing_retpoline_in_module.patch queue-4.15/x86speculation_Add_basic_IBPB_(Indirect_Branch_Prediction_Barrier)_support.patch queue-4.15/x86speculation_Simplify_indirect_branch_prediction_barrier().patch queue-4.15/x86nospec_Fix_header_guards_names.patch queue-4.15/KVM_x86_Make_indirect_calls_in_emulator_speculation_safe.patch queue-4.15/x86uaccess_Use___uaccess_begin_nospec()_and_uaccess_try_nospec.patch queue-4.15/x86entry64_Push_extra_regs_right_away.patch queue-4.15/x86usercopy_Replace_open_coded_stacclac_with___uaccess_begin_end.patch queue-4.15/vfs_fdtable_Prevent_bounds-check_bypass_via_speculative_execution.patch queue-4.15/x86retpoline_Simplify_vmexit_fill_RSB().patch queue-4.15/objtool_Warn_on_stripped_section_symbol.patch queue-4.15/x86spectre_Report_get_user_mitigation_for_spectre_v1.patch queue-4.15/x86cpufeatures_Clean_up_Spectre_v2_related_CPUID_flags.patch queue-4.15/x86syscall_Sanitize_syscall_table_de-references_under_speculation.patch queue-4.15/objtool_Improve_retpoline_alternative_handling.patch