Allocate and switch to 16-byte aligned secondary stack on overflow. This provides us stack space to better handle overflows; and is used in a subsequent patch to dump the hypervisor stacktrace. The overflow stack is only allocated if CONFIG_NVHE_EL2_DEBUG is enabled, as hypervisor stacktraces is a debug feature dependent on CONFIG_NVHE_EL2_DEBUG. Signed-off-by: Kalesh Singh <kaleshsingh@xxxxxxxxxx> --- Changes in v4: - Update comment to clarify resetting the SP to the top of the stack only happens if CONFIG_NVHE_EL2_DEBUG is disabled, per Fuad arch/arm64/kvm/hyp/nvhe/host.S | 11 ++++++++--- arch/arm64/kvm/hyp/nvhe/switch.c | 5 +++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index be6d844279b1..a0c4b4f1549f 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -179,13 +179,18 @@ SYM_FUNC_END(__host_hvc) b hyp_panic .L__hyp_sp_overflow\@: +#ifdef CONFIG_NVHE_EL2_DEBUG + /* Switch to the overflow stack */ + adr_this_cpu sp, hyp_overflow_stack + PAGE_SIZE, x0 +#else /* - * Reset SP to the top of the stack, to allow handling the hyp_panic. - * This corrupts the stack but is ok, since we won't be attempting - * any unwinding here. + * If !CONFIG_NVHE_EL2_DEBUG, reset SP to the top of the stack, to + * allow handling the hyp_panic. This corrupts the stack but is ok, + * since we won't be attempting any unwinding here. */ ldr_this_cpu x0, kvm_init_params + NVHE_INIT_STACK_HYP_VA, x1 mov sp, x0 +#endif bl hyp_panic_bad_stack ASM_BUG() diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 703a5d3f611b..efc20273a352 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -34,6 +34,11 @@ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); DEFINE_PER_CPU(unsigned long, kvm_hyp_vector); +#ifdef CONFIG_NVHE_EL2_DEBUG +DEFINE_PER_CPU(unsigned long [PAGE_SIZE/sizeof(long)], hyp_overflow_stack) + __aligned(16); +#endif + static void __activate_traps(struct kvm_vcpu *vcpu) { u64 val; -- 2.35.1.616.g0bdcbb4464-goog _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm