This is a note to let you know that I've just added the patch titled [Variant 1/Spectre-v1] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user to the 4.14-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: arm64-uaccess-mask-__user-pointers-for-__arch_-clear-copy_-_user.patch and it can be found in the queue-4.14 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From foo@baz Wed Feb 14 14:44:54 CET 2018 From: Will Deacon <will.deacon@xxxxxxx> Date: Mon, 5 Feb 2018 15:34:23 +0000 Subject: [Variant 1/Spectre-v1] arm64: uaccess: Mask __user pointers for __arch_{clear, copy_*}_user From: Will Deacon <will.deacon@xxxxxxx> Commit f71c2ffcb20d upstream. Like we've done for get_user and put_user, ensure that user pointers are masked before invoking the underlying __arch_{clear,copy_*}_user operations. Signed-off-by: Will Deacon <will.deacon@xxxxxxx> Signed-off-by: Catalin Marinas <catalin.marinas@xxxxxxx> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/arm64/include/asm/uaccess.h | 29 ++++++++++++++++++++++------- arch/arm64/kernel/arm64ksyms.c | 4 ++-- arch/arm64/lib/clear_user.S | 6 +++--- arch/arm64/lib/copy_in_user.S | 5 +++-- 4 files changed, 30 insertions(+), 14 deletions(-) --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -391,20 +391,35 @@ do { \ #define put_user __put_user extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); -#define raw_copy_from_user __arch_copy_from_user +#define raw_copy_from_user(to, from, n) \ +({ \ + __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ +}) + extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); -#define raw_copy_to_user __arch_copy_to_user -extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); +#define raw_copy_to_user(to, from, n) \ +({ \ + __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \ +}) + +extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); +#define raw_copy_in_user(to, from, n) \ +({ \ + __arch_copy_in_user(__uaccess_mask_ptr(to), \ + __uaccess_mask_ptr(from), (n)); \ +}) + #define INLINE_COPY_TO_USER #define INLINE_COPY_FROM_USER -static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) +extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); +static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) { if (access_ok(VERIFY_WRITE, to, n)) - n = __clear_user(__uaccess_mask_ptr(to), n); + n = __arch_clear_user(__uaccess_mask_ptr(to), n); return n; } +#define clear_user __clear_user extern long strncpy_from_user(char *dest, const char __user *src, long count); @@ -418,7 +433,7 @@ extern unsigned long __must_check __copy static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) { kasan_check_write(dst, size); - return __copy_user_flushcache(dst, src, size); + return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size); } #endif --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -37,8 +37,8 @@ EXPORT_SYMBOL(clear_page); /* user mem (segment) */ EXPORT_SYMBOL(__arch_copy_from_user); EXPORT_SYMBOL(__arch_copy_to_user); -EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(raw_copy_in_user); +EXPORT_SYMBOL(__arch_clear_user); +EXPORT_SYMBOL(__arch_copy_in_user); /* physical memory */ EXPORT_SYMBOL(memstart_addr); --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -21,7 +21,7 @@ .text -/* Prototype: int __clear_user(void *addr, size_t sz) +/* Prototype: int __arch_clear_user(void *addr, size_t sz) * Purpose : clear some user memory * Params : addr - user memory address to clear * : sz - number of bytes to clear @@ -29,7 +29,7 @@ * * Alignment fixed up by hardware. */ -ENTRY(__clear_user) +ENTRY(__arch_clear_user) uaccess_enable_not_uao x2, x3, x4 mov x2, x1 // save the size for fixup return subs x1, x1, #8 @@ -52,7 +52,7 @@ uao_user_alternative 9f, strb, sttrb, wz 5: mov x0, #0 uaccess_disable_not_uao x2, x3 ret -ENDPROC(__clear_user) +ENDPROC(__arch_clear_user) .section .fixup,"ax" .align 2 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -64,14 +64,15 @@ .endm end .req x5 -ENTRY(raw_copy_in_user) + +ENTRY(__arch_copy_in_user) uaccess_enable_not_uao x3, x4, x5 add end, x0, x2 #include "copy_template.S" uaccess_disable_not_uao x3, x4 mov x0, #0 ret -ENDPROC(raw_copy_in_user) +ENDPROC(__arch_copy_in_user) .section .fixup,"ax" .align 2 Patches currently in stable-queue which might be from will.deacon@xxxxxxx are queue-4.14/arm64-make-user_ds-an-inclusive-limit.patch queue-4.14/arm64-mm-remove-pre_ttbr0_update_workaround-for-falkor-erratum-e1003.patch queue-4.14/arm64-uaccess-don-t-bother-eliding-access_ok-checks-in-__-get-put-_user.patch queue-4.14/arm64-cpufeature-pass-capability-structure-to-enable-callback.patch queue-4.14/arm64-uaccess-mask-__user-pointers-for-__arch_-clear-copy_-_user.patch queue-4.14/arm64-mm-add-arm64_kernel_unmapped_at_el0-helper.patch queue-4.14/arm64-entry-reword-comment-about-post_ttbr_update_workaround.patch queue-4.14/arm64-kaslr-put-kernel-vectors-address-in-separate-data-page.patch queue-4.14/arm-arm64-smccc-make-function-identifiers-an-unsigned-quantity.patch queue-4.14/arm64-move-bp-hardening-to-check_and_switch_context.patch queue-4.14/arm-arm64-kvm-advertise-smccc-v1.1.patch queue-4.14/arm64-move-post_ttbr_update_workaround-to-c-code.patch queue-4.14/arm64-turn-on-kpti-only-on-cpus-that-need-it.patch queue-4.14/firmware-psci-expose-psci-conduit.patch queue-4.14/arm64-force-kpti-to-be-disabled-on-cavium-thunderx.patch queue-4.14/arm64-entry-apply-bp-hardening-for-high-priority-synchronous-exceptions.patch queue-4.14/arm64-kpti-fix-the-interaction-between-asid-switching-and-software-pan.patch queue-4.14/firmware-psci-expose-smccc-version-through-psci_ops.patch queue-4.14/arm64-mm-permit-transitioning-from-global-to-non-global-without-bbm.patch queue-4.14/arm64-mm-allocate-asids-in-pairs.patch queue-4.14/arm64-tls-avoid-unconditional-zeroing-of-tpidrro_el0-for-native-tasks.patch queue-4.14/arm64-use-ret-instruction-for-exiting-the-trampoline.patch queue-4.14/arm64-futex-mask-__user-pointers-prior-to-dereference.patch queue-4.14/arm64-entry-explicitly-pass-exception-level-to-kernel_ventry-macro.patch queue-4.14/arm64-implement-branch-predictor-hardening-for-affected-cortex-a-cpus.patch queue-4.14/arm64-kpti-make-use-of-ng-dependent-on-arm64_kernel_unmapped_at_el0.patch queue-4.14/arm-arm64-kvm-add-psci_version-helper.patch queue-4.14/arm64-kill-psci_get_version-as-a-variant-2-workaround.patch queue-4.14/arm64-entry-ensure-branch-through-syscall-table-is-bounded-under-speculation.patch queue-4.14/arm64-mm-use-non-global-mappings-for-kernel-space.patch queue-4.14/arm64-entry-apply-bp-hardening-for-suspicious-interrupts-from-el0.patch queue-4.14/arm64-capabilities-handle-duplicate-entries-for-a-capability.patch queue-4.14/arm64-entry-hook-up-entry-trampoline-to-exception-vectors.patch queue-4.14/arm64-branch-predictor-hardening-for-cavium-thunderx2.patch queue-4.14/arm64-uaccess-prevent-speculative-use-of-the-current-addr_limit.patch queue-4.14/.arm64-add-software-workaround-for-falkor-erratum-1041.patch.swp queue-4.14/arm64-use-pointer-masking-to-limit-uaccess-speculation.patch queue-4.14/arm64-add-arm_smccc_arch_workaround_1-bp-hardening-support.patch queue-4.14/arm64-erratum-work-around-falkor-erratum-e1003-in-trampoline-code.patch queue-4.14/arm64-mm-fix-and-re-enable-arm64_sw_ttbr0_pan.patch queue-4.14/arm64-mm-invalidate-both-kernel-and-user-asids-when-performing-tlbi.patch queue-4.14/drivers-firmware-expose-psci_get_version-through-psci_ops-structure.patch queue-4.14/arm64-mm-rename-post_ttbr0_update_workaround.patch queue-4.14/arm64-mm-map-entry-trampoline-into-trampoline-and-kernel-page-tables.patch queue-4.14/arm-arm64-kvm-turn-kvm_psci_version-into-a-static-inline.patch queue-4.14/arm-arm64-kvm-implement-psci-1.0-support.patch queue-4.14/arm64-move-task_-definitions-to-asm-processor.h.patch queue-4.14/arm64-kconfig-reword-unmap_kernel_at_el0-kconfig-entry.patch queue-4.14/arm64-mm-move-asid-from-ttbr0-to-ttbr1.patch queue-4.14/arm64-mm-introduce-ttbr_asid_mask-for-getting-at-the-asid-in-the-ttbr.patch queue-4.14/arm64-kvm-add-smccc_arch_workaround_1-fast-handling.patch queue-4.14/arm64-take-into-account-id_aa64pfr0_el1.csv3.patch queue-4.14/arm64-cputype-add-missing-midr-values-for-cortex-a72-and-cortex-a75.patch queue-4.14/arm64-kvm-report-smccc_arch_workaround_1-bp-hardening-support.patch queue-4.14/arm64-barrier-add-csdb-macros-to-control-data-value-prediction.patch queue-4.14/arm-arm64-smccc-implement-smccc-v1.1-inline-primitive.patch queue-4.14/arm64-implement-branch-predictor-hardening-for-falkor.patch queue-4.14/arm64-kconfig-add-config_unmap_kernel_at_el0.patch queue-4.14/arm64-add-software-workaround-for-falkor-erratum-1041.patch queue-4.14/arm64-idmap-use-awx-flags-for-.idmap.text-.pushsection-directives.patch queue-4.14/arm64-run-enable-method-for-errata-work-arounds-on-late-cpus.patch queue-4.14/arm64-mm-temporarily-disable-arm64_sw_ttbr0_pan.patch queue-4.14/arm64-entry-add-exception-trampoline-page-for-exceptions-from-el0.patch queue-4.14/arm64-kvm-make-psci_version-a-fast-path.patch queue-4.14/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-early.patch queue-4.14/arm64-implement-array_index_mask_nospec.patch queue-4.14/arm64-kpti-add-enable-callback-to-remap-swapper-using-ng-mappings.patch queue-4.14/arm-arm64-kvm-consolidate-the-psci-include-files.patch queue-4.14/arm64-add-skeleton-to-harden-the-branch-predictor-against-aliasing-attacks.patch queue-4.14/arm64-define-cputype-macros-for-falkor-cpu.patch queue-4.14/arm64-cpu_errata-add-kryo-to-falkor-1003-errata.patch queue-4.14/arm-arm64-kvm-add-smccc-accessors-to-psci-code.patch queue-4.14/arm64-entry-add-fake-cpu-feature-for-unmapping-the-kernel-at-el0.patch queue-4.14/arm64-kvm-use-per-cpu-vector-when-bp-hardening-is-enabled.patch queue-4.14/arm64-cputype-add-midr-values-for-cavium-thunderx2-cpus.patch queue-4.14/arm64-kvm-increment-pc-after-handling-an-smc-trap.patch