4.4-stable review patch. If anyone has any objections, please let me know. ------------------ From: Borislav Petkov <bp@xxxxxxx> commit b74a0cf1b3db30173eefa00c411775d2b1697700 upstream Add an XSTATE_OP() macro which contains the XSAVE* fault handling and replace all non-alternatives users of xstate_fault() with it. This fixes also the buglet in copy_xregs_to_user() and copy_user_to_xregs() where the inline asm didn't have @xstate as memory reference and thus potentially causing unwanted reordering of accesses to the extended state. Signed-off-by: Borislav Petkov <bp@xxxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxxxxxx> Cc: Borislav Petkov <bp@xxxxxxxxx> Cc: Brian Gerst <brgerst@xxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: Denys Vlasenko <dvlasenk@xxxxxxxxxx> Cc: Fenghua Yu <fenghua.yu@xxxxxxxxx> Cc: H. Peter Anvin <hpa@xxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Oleg Nesterov <oleg@xxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Quentin Casasnovas <quentin.casasnovas@xxxxxxxxxx> Cc: Rik van Riel <riel@xxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Link: http://lkml.kernel.org/r/1447932326-4371-2-git-send-email-bp@xxxxxxxxx Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> Signed-off-by: Srivatsa S. Bhat <srivatsa@xxxxxxxxxxxxx> Reviewed-by: Matt Helsley (VMware) <matt.helsley@xxxxxxxxx> Reviewed-by: Alexey Makhalov <amakhalov@xxxxxxxxxx> Reviewed-by: Bo Gan <ganb@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/x86/include/asm/fpu/internal.h | 68 ++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 37 deletions(-) --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -238,6 +238,20 @@ static inline void copy_fxregs_to_kernel _ASM_EXTABLE(1b, 3b) \ : [_err] "=r" (__err) +#define XSTATE_OP(op, st, lmask, hmask, err) \ + asm volatile("1:" op "\n\t" \ + "xor %[err], %[err]\n" \ + "2:\n\t" \ + ".pushsection .fixup,\"ax\"\n\t" \ + "3: movl $-2,%[err]\n\t" \ + "jmp 2b\n\t" \ + ".popsection\n\t" \ + _ASM_EXTABLE(1b, 3b) \ + : [err] "=r" (err) \ + : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ + : "memory") + + /* * This function is called only during boot time when x86 caps are not set * up and alternative can not be used yet. @@ -247,22 +261,14 @@ static inline void copy_xregs_to_kernel_ u64 mask = -1; u32 lmask = mask; u32 hmask = mask >> 32; - int err = 0; + int err; WARN_ON(system_state != SYSTEM_BOOTING); - if (boot_cpu_has(X86_FEATURE_XSAVES)) - asm volatile("1:"XSAVES"\n\t" - "2:\n\t" - xstate_fault(err) - : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) - : "memory"); + if (static_cpu_has_safe(X86_FEATURE_XSAVES)) + XSTATE_OP(XSAVES, xstate, lmask, hmask, err); else - asm volatile("1:"XSAVE"\n\t" - "2:\n\t" - xstate_fault(err) - : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) - : "memory"); + XSTATE_OP(XSAVE, xstate, lmask, hmask, err); /* We should never fault when copying to a kernel buffer: */ WARN_ON_FPU(err); @@ -277,22 +283,14 @@ static inline void copy_kernel_to_xregs_ u64 mask = -1; u32 lmask = mask; u32 hmask = mask >> 32; - int err = 0; + int err; WARN_ON(system_state != SYSTEM_BOOTING); - if (boot_cpu_has(X86_FEATURE_XSAVES)) - asm volatile("1:"XRSTORS"\n\t" - "2:\n\t" - xstate_fault(err) - : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) - : "memory"); + if (static_cpu_has_safe(X86_FEATURE_XSAVES)) + XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); else - asm volatile("1:"XRSTOR"\n\t" - "2:\n\t" - xstate_fault(err) - : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err) - : "memory"); + XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); /* We should never fault when copying from a kernel buffer: */ WARN_ON_FPU(err); @@ -389,12 +387,10 @@ static inline int copy_xregs_to_user(str if (unlikely(err)) return -EFAULT; - __asm__ __volatile__(ASM_STAC "\n" - "1:"XSAVE"\n" - "2: " ASM_CLAC "\n" - xstate_fault(err) - : "D" (buf), "a" (-1), "d" (-1), "0" (err) - : "memory"); + stac(); + XSTATE_OP(XSAVE, buf, -1, -1, err); + clac(); + return err; } @@ -406,14 +402,12 @@ static inline int copy_user_to_xregs(str struct xregs_state *xstate = ((__force struct xregs_state *)buf); u32 lmask = mask; u32 hmask = mask >> 32; - int err = 0; + int err; + + stac(); + XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); + clac(); - __asm__ __volatile__(ASM_STAC "\n" - "1:"XRSTOR"\n" - "2: " ASM_CLAC "\n" - xstate_fault(err) - : "D" (xstate), "a" (lmask), "d" (hmask), "0" (err) - : "memory"); /* memory required? */ return err; }