On Fri, Jul 10, 2020 at 04:19:58PM +0000, guoren@xxxxxxxxxx wrote: > From: Guo Ren <guoren@xxxxxxxxxxxxxxxxx> > > This enables the use of per-task stack canary values if GCC has > support for emitting the stack canary reference relative to the > value of tp, which holds the task struct pointer in the riscv > kernel. > > After compare arm64 and x86 implementations, seems arm64's is more > flexible and readable. The key point is how gcc get the offset of > stack_canary from gs/el0_sp. > > x86: Use a fix offset from gs, not flexible. > > struct fixed_percpu_data { > /* > * GCC hardcodes the stack canary as %gs:40. Since the > * irq_stack is the object at %gs:0, we reserve the bottom > * 48 bytes of the irq stack for the canary. > */ > char gs_base[40]; // :( > unsigned long stack_canary; > }; > > arm64: Use -mstack-protector-guard-offset & guard-reg > gcc options: > -mstack-protector-guard=sysreg > -mstack-protector-guard-reg=sp_el0 > -mstack-protector-guard-offset=xxx > > riscv: Use -mstack-protector-guard-offset & guard-reg > gcc options: > -mstack-protector-guard=tls > -mstack-protector-guard-reg=tp > -mstack-protector-guard-offset=xxx > > Here is riscv gcc's work [1]. > > [1] https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549583.html > > In the end, these codes are inserted by gcc before return: > > * 0xffffffe00020b396 <+120>: ld a5,1008(tp) # 0x3f0 > * 0xffffffe00020b39a <+124>: xor a5,a5,a4 > * 0xffffffe00020b39c <+126>: mv a0,s5 > * 0xffffffe00020b39e <+128>: bnez a5,0xffffffe00020b61c <_do_fork+766> > 0xffffffe00020b3a2 <+132>: ld ra,136(sp) > 0xffffffe00020b3a4 <+134>: ld s0,128(sp) > 0xffffffe00020b3a6 <+136>: ld s1,120(sp) > 0xffffffe00020b3a8 <+138>: ld s2,112(sp) > 0xffffffe00020b3aa <+140>: ld s3,104(sp) > 0xffffffe00020b3ac <+142>: ld s4,96(sp) > 0xffffffe00020b3ae <+144>: ld s5,88(sp) > 0xffffffe00020b3b0 <+146>: ld s6,80(sp) > 0xffffffe00020b3b2 <+148>: ld s7,72(sp) > 0xffffffe00020b3b4 <+150>: addi sp,sp,144 > 0xffffffe00020b3b6 <+152>: ret > ... > * 0xffffffe00020b61c <+766>: auipc ra,0x7f8 > * 0xffffffe00020b620 <+770>: jalr -1764(ra) # 0xffffffe000a02f38 <__stack_chk_fail> > > Signed-off-by: Guo Ren <guoren@xxxxxxxxxxxxxxxxx> > Signed-off-by: cooper <cooper.qu@xxxxxxxxxxxxxxxxx> > Cc: cooper <cooper.qu@xxxxxxxxxxxxxxxxx> > Cc: Kees Cook <keescook@xxxxxxxxxxxx> > --- > Change v2: > - Change to -mstack-protector-guard=tls for gcc final define > - Solve compile error by changing position of KBUILD_CFLAGS in > Makefile > > Signed-off-by: Guo Ren <guoren@xxxxxxxxxxxxxxxxx> > --- > arch/riscv/Kconfig | 7 +++++++ > arch/riscv/Makefile | 10 ++++++++++ > arch/riscv/include/asm/stackprotector.h | 3 ++- > arch/riscv/kernel/asm-offsets.c | 3 +++ > arch/riscv/kernel/process.c | 2 +- > 5 files changed, 23 insertions(+), 2 deletions(-) > > diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig > index 4b0e308..d98ce29 100644 > --- a/arch/riscv/Kconfig > +++ b/arch/riscv/Kconfig > @@ -394,6 +394,13 @@ config CMDLINE_FORCE > > endchoice > > +config CC_HAVE_STACKPROTECTOR_TLS > + def_bool $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=tp -mstack-protector-guard-offset=0) > + > +config STACKPROTECTOR_PER_TASK > + def_bool y > + depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS > + > endmenu > > config BUILTIN_DTB > diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile > index fb6e37d..f5f8ee9 100644 > --- a/arch/riscv/Makefile > +++ b/arch/riscv/Makefile > @@ -68,6 +68,16 @@ KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) > # architectures. It's faster to have GCC emit only aligned accesses. > KBUILD_CFLAGS += $(call cc-option,-mstrict-align) > > +ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y) > +prepare: stack_protector_prepare > +stack_protector_prepare: prepare0 > + $(eval KBUILD_CFLAGS += -mstack-protector-guard=tls \ > + -mstack-protector-guard-reg=tp \ > + -mstack-protector-guard-offset=$(shell \ > + awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \ > + include/generated/asm-offsets.h)) > +endif > + > # arch specific predefines for sparse > CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS) > > diff --git a/arch/riscv/include/asm/stackprotector.h b/arch/riscv/include/asm/stackprotector.h > index d95f7b2..a895e07 100644 > --- a/arch/riscv/include/asm/stackprotector.h > +++ b/arch/riscv/include/asm/stackprotector.h > @@ -28,6 +28,7 @@ static __always_inline void boot_init_stack_canary(void) > canary &= CANARY_MASK; > > current->stack_canary = canary; > - __stack_chk_guard = current->stack_canary; > + if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK)) > + __stack_chk_guard = current->stack_canary; > } > #endif /* _ASM_RISCV_STACKPROTECTOR_H */ > diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c > index 07cb9c1..999b465 100644 > --- a/arch/riscv/kernel/asm-offsets.c > +++ b/arch/riscv/kernel/asm-offsets.c > @@ -29,6 +29,9 @@ void asm_offsets(void) > OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]); > OFFSET(TASK_THREAD_SP, task_struct, thread.sp); > OFFSET(TASK_STACK, task_struct, stack); > +#ifdef CONFIG_STACKPROTECTOR Should this be CONFIG_STACKPROTECTOR_PER_TASK ? > + OFFSET(TSK_STACK_CANARY, task_struct, stack_canary); > +#endif > OFFSET(TASK_TI, task_struct, thread_info); > OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); > OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); > diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c > index 6548929..cb4ac65 100644 > --- a/arch/riscv/kernel/process.c > +++ b/arch/riscv/kernel/process.c > @@ -24,7 +24,7 @@ > > register unsigned long gp_in_global __asm__("gp"); > > -#ifdef CONFIG_STACKPROTECTOR > +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) > #include <linux/stackprotector.h> > unsigned long __stack_chk_guard __read_mostly; > EXPORT_SYMBOL(__stack_chk_guard); > -- > 2.7.4 > -- Kees Cook