On Wed, 3 Apr 2019, Sebastian Andrzej Siewior wrote: > The previous commits refactor the restoration of the FPU registers so > that they can be loaded from in-kernel memory. This overhead can be > avoided if the load can be performed without a pagefault. > > Attempt to restore FPU registers by invoking > copy_user_to_fpregs_zeroing(). If it fails try the slowpath which can handle > pagefaults. > > Signed-off-by: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx> > --- > arch/x86/kernel/fpu/signal.c | 16 ++++++++++++++-- > 1 file changed, 14 insertions(+), 2 deletions(-) > > diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c > index a5b086ec426a5..f20e1d1fffa29 100644 > --- a/arch/x86/kernel/fpu/signal.c > +++ b/arch/x86/kernel/fpu/signal.c > @@ -242,10 +242,10 @@ sanitize_restored_xstate(union fpregs_state *state, > /* > * Restore the extended state if present. Otherwise, restore the FP/SSE state. > */ > -static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only) > +static int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only) > { > if (use_xsave()) { > - if ((unsigned long)buf % 64 || fx_only) { > + if (fx_only) { This change is weird and not mentioned in the changelog.... > u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE; > copy_kernel_to_xregs(&init_fpstate.xsave, init_bv); > return copy_user_to_fxregs(buf); > @@ -327,7 +327,19 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) > if (ret) > goto err_out; > envp = &env; > + } else { > + fpregs_lock(); > + pagefault_disable(); > + ret = copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only); > + pagefault_enable(); > + if (!ret) { > + fpregs_mark_activate(); > + fpregs_unlock(); > + return 0; > + } > + fpregs_unlock(); > } > + > if (use_xsave() && !fx_only) { > u64 init_bv = xfeatures_mask & ~xfeatures; > > -- > 2.20.1 > >