On Sat, 2015-08-29 at 09:20 -0700, Andy Lutomirski wrote: > If you haven't pushed this out yet, I would hold off. I think it may > be unnecessary. Okay, I've dropped this revert from the 3.19-stable queue. And for reference, your post: http://seclists.org/oss-sec/2015/q3/456 Thanks very much for the heads-up, Andy. -Kamal > On Thu, Aug 27, 2015 at 3:12 PM, Kamal Mostafa <kamal@xxxxxxxxxxxxx> wrote: > > 3.19.8-ckt6 -stable review patch. If anyone has any objections, please let me know. > > > > ------------------ > > > > From: Andy Lutomirski <luto@xxxxxxxxxx> > > > > commit 512255a2ad2c832ca7d4de9f31245f73781922d0 upstream. > > > > This reverts commit: > > > > 2c7577a75837 ("sched/x86_64: Don't save flags on context switch") > > > > It was a nice speedup. It's also not quite correct: SYSENTER > > enables interrupts too early. > > > > We can re-add this optimization once the SYSENTER code is beaten > > into shape, which should happen in 4.3 or 4.4. > > > > Signed-off-by: Andy Lutomirski <luto@xxxxxxxxxx> > > Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> > > Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> > > Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> > > Link: http://lkml.kernel.org/r/85f56651f59f76624e80785a8fd3bdfdd089a818.1439838962.git.luto@xxxxxxxxxx > > Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> > > Signed-off-by: Kamal Mostafa <kamal@xxxxxxxxxxxxx> > > --- > > arch/x86/include/asm/switch_to.h | 12 ++++-------- > > 1 file changed, 4 insertions(+), 8 deletions(-) > > > > diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h > > index 751bf4b..d7f3b3b 100644 > > --- a/arch/x86/include/asm/switch_to.h > > +++ b/arch/x86/include/asm/switch_to.h > > @@ -79,12 +79,12 @@ do { \ > > #else /* CONFIG_X86_32 */ > > > > /* frame pointer must be last for get_wchan */ > > -#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t" > > -#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t" > > +#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" > > +#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" > > > > #define __EXTRA_CLOBBER \ > > , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ > > - "r12", "r13", "r14", "r15", "flags" > > + "r12", "r13", "r14", "r15" > > > > #ifdef CONFIG_CC_STACKPROTECTOR > > #define __switch_canary \ > > @@ -100,11 +100,7 @@ do { \ > > #define __switch_canary_iparam > > #endif /* CC_STACKPROTECTOR */ > > > > -/* > > - * There is no need to save or restore flags, because flags are always > > - * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL > > - * has no effect. > > - */ > > +/* Save restore flags to clear handle leaking NT */ > > #define switch_to(prev, next, last) \ > > asm volatile(SAVE_CONTEXT \ > > "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ > > -- > > 1.9.1 > > > > > -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html