The ia32_fxstate case (32bit with fxsr) and the other (64bit, 32bit without fxsr) restore both from kernel memory and sanitize the content. The !ia32_fxstate version restores missing xstates from "init state" while the ia32_fxstate doesn't and skips it. Merge the two code paths and keep the !ia32_fxstate version. Copy only the user_i387_ia32_struct data structure in the ia32_fxstate. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx> --- arch/x86/kernel/fpu/signal.c | 162 ++++++++++++++--------------------- 1 file changed, 65 insertions(+), 97 deletions(-) diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 4ed5c400cac58..a17e75fa1a0a6 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -224,12 +224,17 @@ sanitize_restored_xstate(union fpregs_state *state, static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) { + struct user_i387_ia32_struct *envp = NULL; int ia32_fxstate = (buf != buf_fx); struct task_struct *tsk = current; struct fpu *fpu = &tsk->thread.fpu; int state_size = fpu_kernel_xstate_size; + struct user_i387_ia32_struct env; + union fpregs_state *state; u64 xfeatures = 0; int fx_only = 0; + int ret = 0; + void *tmp; ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) || IS_ENABLED(CONFIG_IA32_EMULATION)); @@ -264,106 +269,69 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) } } + tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + state = PTR_ALIGN(tmp, 64); + + if ((unsigned long)buf_fx % 64) + fx_only = 1; + + /* + * For 32-bit frames with fxstate, copy the fxstate so it can be + * reconstructed later. + */ if (ia32_fxstate) { - /* - * For 32-bit frames with fxstate, copy the user state to the - * thread's fpu state, reconstruct fxstate from the fsave - * header. Validate and sanitize the copied state. - */ - struct user_i387_ia32_struct env; - union fpregs_state *state; - int err = 0; - void *tmp; - - tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL); - if (!tmp) - return -ENOMEM; - state = PTR_ALIGN(tmp, 64); - - if (using_compacted_format()) { - err = copy_user_to_xstate(&state->xsave, buf_fx); - } else { - err = __copy_from_user(&state->xsave, buf_fx, state_size); - - if (!err && state_size > offsetof(struct xregs_state, header)) - err = validate_xstate_header(&state->xsave.header); - } - - if (err || __copy_from_user(&env, buf, sizeof(env))) { - err = -1; - } else { - sanitize_restored_xstate(state, &env, - xfeatures, fx_only); - copy_kernel_to_fpregs(state); - } - - kfree(tmp); - return err; - } else { - union fpregs_state *state; - void *tmp; - int ret; - - tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL); - if (!tmp) - return -ENOMEM; - state = PTR_ALIGN(tmp, 64); - - /* - * For 64-bit frames and 32-bit fsave frames, restore the user - * state to the registers directly (with exceptions handled). - */ - if ((unsigned long)buf_fx % 64) - fx_only = 1; - - if (use_xsave() && !fx_only) { - u64 init_bv = xfeatures_mask & ~xfeatures; - - if (using_compacted_format()) { - ret = copy_user_to_xstate(&state->xsave, buf_fx); - } else { - ret = __copy_from_user(&state->xsave, buf_fx, state_size); - - if (!ret && state_size > offsetof(struct xregs_state, header)) - ret = validate_xstate_header(&state->xsave.header); - } - if (ret) - goto err_out; - sanitize_restored_xstate(state, NULL, xfeatures, - fx_only); - - if (unlikely(init_bv)) - copy_kernel_to_xregs(&init_fpstate.xsave, init_bv); - ret = copy_users_to_xregs(&state->xsave, xfeatures); - - } else if (use_fxsr()) { - ret = __copy_from_user(&state->fxsave, buf_fx, state_size); - if (ret) - goto err_out; - - if (use_xsave()) { - u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE; - copy_kernel_to_xregs(&init_fpstate.xsave, init_bv); - } - state->fxsave.mxcsr &= mxcsr_feature_mask; - - ret = copy_users_to_fxregs(&state->fxsave); - } else { - ret = __copy_from_user(&state->fsave, buf_fx, state_size); - if (ret) - goto err_out; - ret = copy_users_to_fregs(buf_fx); - } - -err_out: - kfree(tmp); - if (ret) { - fpu__clear(fpu); - return -1; - } + ret = __copy_from_user(&env, buf, sizeof(env)); + if (ret) + goto err_out; + envp = &env; } - return 0; + if (use_xsave() && !fx_only) { + u64 init_bv = xfeatures_mask & ~xfeatures; + + if (using_compacted_format()) { + ret = copy_user_to_xstate(&state->xsave, buf_fx); + } else { + ret = __copy_from_user(&state->xsave, buf_fx, state_size); + + if (!ret && state_size > offsetof(struct xregs_state, header)) + ret = validate_xstate_header(&state->xsave.header); + } + if (ret) + goto err_out; + + sanitize_restored_xstate(state, envp, xfeatures, fx_only); + + if (unlikely(init_bv)) + copy_kernel_to_xregs(&init_fpstate.xsave, init_bv); + ret = copy_users_to_xregs(&state->xsave, xfeatures); + + } else if (use_fxsr()) { + ret = __copy_from_user(&state->fxsave, buf_fx, state_size); + if (ret) + goto err_out; + + sanitize_restored_xstate(state, envp, xfeatures, fx_only); + if (use_xsave()) { + u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE; + copy_kernel_to_xregs(&init_fpstate.xsave, init_bv); + } + + ret = copy_users_to_fxregs(&state->fxsave); + } else { + ret = __copy_from_user(&state->fsave, buf_fx, state_size); + if (ret) + goto err_out; + ret = copy_users_to_fregs(buf_fx); + } + +err_out: + kfree(tmp); + if (ret) + fpu__clear(fpu); + return ret; } static inline int xstate_sigframe_size(void) -- 2.20.1