Re: [PATCH -next V10 08/10] riscv: stack: Support HAVE_IRQ_EXIT_ON_IRQ_STACK

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



guoren@xxxxxxxxxx writes:

> From: Guo Ren <guoren@xxxxxxxxxxxxxxxxx>
>
> Add independent irq stacks for percpu to prevent kernel stack overflows.
> It is also compatible with VMAP_STACK by implementing
> arch_alloc_vmap_stack.  Many architectures have supported
> HAVE_IRQ_EXIT_ON_IRQ_STACK, riscv should follow up.
>

I still would like to see this as a separate series, and that the
generic entry series ended with the previous patch. It's already a lot
of moving pieces in this series. Now add stack changes? Is this really
required for generic entry support?

Some comments below.

> Tested-by: Jisheng Zhang <jszhang@xxxxxxxxxx>
> Signed-off-by: Guo Ren <guoren@xxxxxxxxxxxxxxxxx>
> Signed-off-by: Guo Ren <guoren@xxxxxxxxxx>
> ---
>  arch/riscv/Kconfig                   |  8 ++++
>  arch/riscv/include/asm/thread_info.h |  2 +
>  arch/riscv/include/asm/vmap_stack.h  | 28 ++++++++++++
>  arch/riscv/kernel/irq.c              | 66 +++++++++++++++++++++++++++-
>  4 files changed, 102 insertions(+), 2 deletions(-)
>  create mode 100644 arch/riscv/include/asm/vmap_stack.h
>
> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
> index 518e8523d41d..0a9d4bdc0338 100644
> --- a/arch/riscv/Kconfig
> +++ b/arch/riscv/Kconfig
> @@ -446,6 +446,14 @@ config FPU
>  
>  	  If you don't know what to do here, say Y.
>  
> +config IRQ_STACKS
> +	bool "Independent irq stacks" if EXPERT
> +	default y
> +	select HAVE_IRQ_EXIT_ON_IRQ_STACK
> +	help
> +	  Add independent irq stacks for percpu to prevent kernel stack overflows.
> +	  We may save some memory footprint by disabling IRQ_STACKS.
> +

Other archs uses CONFIG_IRQSTACKS. Let's use that for riscv as well, and
also use the same Kconfig wording as the other archs.

>  endmenu # "Platform type"
>  
>  menu "Kernel features"
> diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
> index 7de4fb96f0b5..043da8ccc7e6 100644
> --- a/arch/riscv/include/asm/thread_info.h
> +++ b/arch/riscv/include/asm/thread_info.h
> @@ -40,6 +40,8 @@
>  #define OVERFLOW_STACK_SIZE     SZ_4K
>  #define SHADOW_OVERFLOW_STACK_SIZE (1024)
>  
> +#define IRQ_STACK_SIZE		THREAD_SIZE
> +
>  #ifndef __ASSEMBLY__
>  
>  extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
> diff --git a/arch/riscv/include/asm/vmap_stack.h b/arch/riscv/include/asm/vmap_stack.h
> new file mode 100644
> index 000000000000..3fbf481abf4f
> --- /dev/null
> +++ b/arch/riscv/include/asm/vmap_stack.h
> @@ -0,0 +1,28 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +// Copied from arch/arm64/include/asm/vmap_stack.h.
> +#ifndef _ASM_RISCV_VMAP_STACK_H
> +#define _ASM_RISCV_VMAP_STACK_H
> +
> +#include <linux/bug.h>
> +#include <linux/gfp.h>
> +#include <linux/kconfig.h>
> +#include <linux/vmalloc.h>
> +#include <linux/pgtable.h>
> +#include <asm/thread_info.h>
> +
> +/*
> + * To ensure that VMAP'd stack overflow detection works correctly, all VMAP'd
> + * stacks need to have the same alignment.
> + */
> +static inline unsigned long *arch_alloc_vmap_stack(size_t stack_size, int node)
> +{
> +	void *p;
> +
> +	BUILD_BUG_ON(!IS_ENABLED(CONFIG_VMAP_STACK));
> +
> +	p = __vmalloc_node(stack_size, THREAD_ALIGN, THREADINFO_GFP, node,
> +			__builtin_return_address(0));
> +	return kasan_reset_tag(p);
> +}
> +
> +#endif /* _ASM_RISCV_VMAP_STACK_H */
> diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
> index 24c2e1bd756a..5d77f692b198 100644
> --- a/arch/riscv/kernel/irq.c
> +++ b/arch/riscv/kernel/irq.c
> @@ -10,6 +10,37 @@
>  #include <linux/irqchip.h>
>  #include <linux/seq_file.h>
>  #include <asm/smp.h>
> +#include <asm/vmap_stack.h>
> +
> +#ifdef CONFIG_IRQ_STACKS
> +static DEFINE_PER_CPU(ulong *, irq_stack_ptr);
> +
> +#ifdef CONFIG_VMAP_STACK
> +static void init_irq_stacks(void)
> +{
> +	int cpu;
> +	ulong *p;
> +
> +	for_each_possible_cpu(cpu) {
> +		p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu));
> +		per_cpu(irq_stack_ptr, cpu) = p;
> +	}
> +}
> +#else
> +/* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */
> +DEFINE_PER_CPU_ALIGNED(ulong [IRQ_STACK_SIZE/sizeof(ulong)], irq_stack);
> +
> +static void init_irq_stacks(void)
> +{
> +	int cpu;
> +
> +	for_each_possible_cpu(cpu)
> +		per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu);
> +}
> +#endif /* CONFIG_VMAP_STACK */
> +#else
> +static void init_irq_stacks(void) {}
> +#endif /* CONFIG_IRQ_STACKS */
>  
>  int arch_show_interrupts(struct seq_file *p, int prec)
>  {
> @@ -19,21 +50,52 @@ int arch_show_interrupts(struct seq_file *p, int prec)
>  
>  void __init init_IRQ(void)
>  {
> +	init_irq_stacks();
>  	irqchip_init();
>  	if (!handle_arch_irq)
>  		panic("No interrupt controller found.");
>  }
>  
> -asmlinkage void noinstr do_riscv_irq(struct pt_regs *regs)
> +static void noinstr handle_riscv_irq(struct pt_regs *regs)
>  {
>  	struct pt_regs *old_regs;
> -	irqentry_state_t state = irqentry_enter(regs);
>  
>  	irq_enter_rcu();
>  	old_regs = set_irq_regs(regs);
>  	handle_arch_irq(regs);
>  	set_irq_regs(old_regs);
>  	irq_exit_rcu();
> +}
> +
> +asmlinkage void noinstr do_riscv_irq(struct pt_regs *regs)
> +{
> +	irqentry_state_t state = irqentry_enter(regs);
> +#ifdef CONFIG_IRQ_STACKS
> +	if (on_thread_stack()) {
> +		ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
> +					+ IRQ_STACK_SIZE/sizeof(ulong);
> +		__asm__ __volatile(
> +		"addi	sp, sp, -"RISCV_SZPTR  "\n"
> +		REG_S"  ra, (sp)		\n"
> +		"addi	sp, sp, -"RISCV_SZPTR  "\n"
> +		REG_S"  s0, (sp)		\n"
> +		"addi	s0, sp, 2*"RISCV_SZPTR "\n"
> +		"move	sp, %[sp]		\n"
> +		"move	a0, %[regs]		\n"
> +		"call	handle_riscv_irq	\n"
> +		"addi	sp, s0, -2*"RISCV_SZPTR"\n"
> +		REG_L"  s0, (sp)		\n"
> +		"addi	sp, sp, "RISCV_SZPTR   "\n"
> +		REG_L"  ra, (sp)		\n"
> +		"addi	sp, sp, "RISCV_SZPTR   "\n"
> +		:
> +		: [sp] "r" (sp), [regs] "r" (regs)
> +		: "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
> +		  "t0", "t1", "t2", "t3", "t4", "t5", "t6",
> +		  "memory");

This whole assembly thing will be C&P in later commits. Can we please do
something like x86 does here (call_on_stack --
arch/x86/include/asm/irq_stack.h), which will hurt our eyes a bit less,
and make it more maintainable?


Björn




[Index of Archives]     [Linux Kernel]     [Kernel Newbies]     [x86 Platform Driver]     [Netdev]     [Linux Wireless]     [Netfilter]     [Bugtraq]     [Linux Filesystems]     [Yosemite Discussion]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Device Mapper]

  Powered by Linux