On Tue, Sep 20, 2022, at 2:46 AM, Guo Ren wrote: > > How about this one: (only THREAD_SIZE, no THREAD_ORDER&SHIFT.) > > - > /* thread information allocation */ > -#ifdef CONFIG_64BIT > -#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER) > -#else > -#define THREAD_SIZE_ORDER (1 + KASAN_STACK_ORDER) > -#endif > -#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) > +#define THREAD_SIZE CONFIG_THREAD_SIZE So far looks fine. > > /* > * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by > - * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry > - * assembly. > + * checking sp & THREAD_SIZE, which we can do cheaply in the entry assembly. > */ > #ifdef CONFIG_VMAP_STACK > #define THREAD_ALIGN (2 * THREAD_SIZE) > @@ -36,7 +24,6 @@ > #define THREAD_ALIGN THREAD_SIZE > #endif The THREAD_ALIGN does not, this only works for power-of-two numbers of THREAD_SIZE, > diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S > index 426529b84db0..1e35fb3bdae5 100644 > --- a/arch/riscv/kernel/entry.S > +++ b/arch/riscv/kernel/entry.S > @@ -29,8 +29,8 @@ _restore_kernel_tpsp: > > #ifdef CONFIG_VMAP_STACK > addi sp, sp, -(PT_SIZE_ON_STACK) > - srli sp, sp, THREAD_SHIFT > - andi sp, sp, 0x1 > + srli sp, sp, PAGE_SHIFT > + andi sp, sp, (THREAD_SIZE >> PAGE_SHIFT) I think this needs to use THREAD_ALIGN, not THREAD_SIZE. Arnd