From: Yu-cheng Yu <yu-cheng.yu@xxxxxxxxx> Shadow stack's are normally written to via CALL/RET or specific CET instuctions like RSTORSSP/SAVEPREVSSP. However during some Linux operations the kernel will need to write to directly using the ring-0 only WRUSS instruction. A shadow stack restore token marks a restore point of the shadow stack, and the address in a token must point directly above the token, which is within the same shadow stack. This is distinctively different from other pointers on the shadow stack, since those pointers point to executable code area. Introduce token setup and verify routines. Also introduce WRUSS, which is a kernel-mode instruction but writes directly to user shadow stack. In future patches that enable shadow stack to work with signals, the kernel will need something to denote the point in the stack where sigreturn may be called. This will prevent attackers calling sigreturn at arbitrary places in the stack, in order to help prevent SROP attacks. To do this, something that can only be written by the kernel needs to be placed on the shadow stack. This can be accomplished by setting bit 63 in the frame written to the shadow stack. Userspace return addresses can't have this bit set as it is in the kernel range. It is also can't be a valid restore token. Signed-off-by: Yu-cheng Yu <yu-cheng.yu@xxxxxxxxx> Co-developed-by: Rick Edgecombe <rick.p.edgecombe@xxxxxxxxx> Signed-off-by: Rick Edgecombe <rick.p.edgecombe@xxxxxxxxx> Cc: Kees Cook <keescook@xxxxxxxxxxxx> --- v2: - Add data helpers for writing to shadow stack. v1: - Use xsave helpers. Yu-cheng v30: - Update commit log, remove description about signals. - Update various comments. - Remove variable 'ssp' init and adjust return value accordingly. - Check get_user_shstk_addr() return value. - Replace 'ia32' with 'proc32'. Yu-cheng v29: - Update comments for the use of get_xsave_addr(). arch/x86/include/asm/special_insns.h | 13 ++++ arch/x86/kernel/shstk.c | 108 +++++++++++++++++++++++++++ 2 files changed, 121 insertions(+) diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index 35f709f619fb..f096f52bd059 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -223,6 +223,19 @@ static inline void clwb(volatile void *__p) : [pax] "a" (p)); } +#ifdef CONFIG_X86_SHADOW_STACK +static inline int write_user_shstk_64(u64 __user *addr, u64 val) +{ + asm_volatile_goto("1: wrussq %[val], (%[addr])\n" + _ASM_EXTABLE(1b, %l[fail]) + :: [addr] "r" (addr), [val] "r" (val) + :: fail); + return 0; +fail: + return -EFAULT; +} +#endif /* CONFIG_X86_SHADOW_STACK */ + #define nop() asm volatile ("nop") static inline void serialize(void) diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c index db4e53f9fdaf..8904aef487bf 100644 --- a/arch/x86/kernel/shstk.c +++ b/arch/x86/kernel/shstk.c @@ -25,6 +25,8 @@ #include <asm/fpu/api.h> #include <asm/prctl.h> +#define SS_FRAME_SIZE 8 + static bool feature_enabled(unsigned long features) { return current->thread.features & features; @@ -40,6 +42,31 @@ static void feature_clr(unsigned long features) current->thread.features &= ~features; } +/* + * Create a restore token on the shadow stack. A token is always 8-byte + * and aligned to 8. + */ +static int create_rstor_token(unsigned long ssp, unsigned long *token_addr) +{ + unsigned long addr; + + /* Token must be aligned */ + if (!IS_ALIGNED(ssp, 8)) + return -EINVAL; + + addr = ssp - SS_FRAME_SIZE; + + /* Mark the token 64-bit */ + ssp |= BIT(0); + + if (write_user_shstk_64((u64 __user *)addr, (u64)ssp)) + return -EFAULT; + + *token_addr = addr; + + return 0; +} + static unsigned long alloc_shstk(unsigned long size) { int flags = MAP_ANONYMOUS | MAP_PRIVATE; @@ -158,6 +185,87 @@ int shstk_alloc_thread_stack(struct task_struct *tsk, unsigned long clone_flags, return 0; } +static unsigned long get_user_shstk_addr(void) +{ + unsigned long long ssp; + + fpu_lock_and_load(); + + rdmsrl(MSR_IA32_PL3_SSP, ssp); + + fpregs_unlock(); + + return ssp; +} + +static int put_shstk_data(u64 __user *addr, u64 data) +{ + WARN_ON(data & BIT(63)); + + /* + * Mark the high bit so that the sigframe can't be processed as a + * return address. + */ + if (write_user_shstk_64(addr, data | BIT(63))) + return -EFAULT; + return 0; +} + +static int get_shstk_data(unsigned long *data, unsigned long __user *addr) +{ + unsigned long ldata; + + if (unlikely(get_user(ldata, addr))) + return -EFAULT; + + if (!(ldata & BIT(63))) + return -EINVAL; + + *data = ldata & ~BIT(63); + + return 0; +} + +/* + * Verify the user shadow stack has a valid token on it, and then set + * *new_ssp according to the token. + */ +static int shstk_check_rstor_token(unsigned long *new_ssp) +{ + unsigned long token_addr; + unsigned long token; + + token_addr = get_user_shstk_addr(); + if (!token_addr) + return -EINVAL; + + if (get_user(token, (unsigned long __user *)token_addr)) + return -EFAULT; + + /* Is mode flag correct? */ + if (!(token & BIT(0))) + return -EINVAL; + + /* Is busy flag set? */ + if (token & BIT(1)) + return -EINVAL; + + /* Mask out flags */ + token &= ~3UL; + + /* Restore address aligned? */ + if (!IS_ALIGNED(token, 8)) + return -EINVAL; + + /* Token placed properly? */ + if (((ALIGN_DOWN(token, 8) - 8) != token_addr) || token >= TASK_SIZE_MAX) + return -EINVAL; + + *new_ssp = token; + + return 0; +} + void shstk_free(struct task_struct *tsk) { struct thread_shstk *shstk = &tsk->thread.shstk; -- 2.17.1