Hi Kalesh, On Thu, Jul 21, 2022 at 6:58 AM Kalesh Singh <kaleshsingh@xxxxxxxxxx> wrote: > > Implements the common framework necessary for unwind() to work in > the protected nVHE context: > - on_accessible_stack() > - on_overflow_stack() > - unwind_next() > > Protected nVHE unwind() is used to unwind and save the hyp stack > addresses to the shared stacktrace buffer. The host reads the > entries in this buffer, symbolizes and dumps the stacktrace (later > patch in the series). > > Signed-off-by: Kalesh Singh <kaleshsingh@xxxxxxxxxx> > --- Reviewed-by: Fuad Tabba <tabba@xxxxxxxxxx> Cheers, /fuad > arch/arm64/include/asm/stacktrace/common.h | 2 ++ > arch/arm64/include/asm/stacktrace/nvhe.h | 34 ++++++++++++++++++++-- > 2 files changed, 34 insertions(+), 2 deletions(-) > > diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h > index be7920ba70b0..73fd9e143c4a 100644 > --- a/arch/arm64/include/asm/stacktrace/common.h > +++ b/arch/arm64/include/asm/stacktrace/common.h > @@ -34,6 +34,7 @@ enum stack_type { > STACK_TYPE_OVERFLOW, > STACK_TYPE_SDEI_NORMAL, > STACK_TYPE_SDEI_CRITICAL, > + STACK_TYPE_HYP, > __NR_STACK_TYPES > }; > > @@ -186,6 +187,7 @@ static inline int unwind_next_common(struct unwind_state *state, > * > * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL > * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW > + * HYP -> OVERFLOW > * > * ... but the nesting itself is strict. Once we transition from one > * stack to another, it's never valid to unwind back to that first > diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h > index 8f02803a005f..c3688e717136 100644 > --- a/arch/arm64/include/asm/stacktrace/nvhe.h > +++ b/arch/arm64/include/asm/stacktrace/nvhe.h > @@ -39,10 +39,19 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state, > state->pc = pc; > } > > +static inline bool on_hyp_stack(unsigned long sp, unsigned long size, > + struct stack_info *info); > + > static inline bool on_accessible_stack(const struct task_struct *tsk, > unsigned long sp, unsigned long size, > struct stack_info *info) > { > + if (on_accessible_stack_common(tsk, sp, size, info)) > + return true; > + > + if (on_hyp_stack(sp, size, info)) > + return true; > + > return false; > } > > @@ -60,12 +69,27 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); > static inline bool on_overflow_stack(unsigned long sp, unsigned long size, > struct stack_info *info) > { > - return false; > + unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack); > + unsigned long high = low + OVERFLOW_STACK_SIZE; > + > + return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); > +} > + > +static inline bool on_hyp_stack(unsigned long sp, unsigned long size, > + struct stack_info *info) > +{ > + struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); > + unsigned long high = params->stack_hyp_va; > + unsigned long low = high - PAGE_SIZE; > + > + return on_stack(sp, size, low, high, STACK_TYPE_HYP, info); > } > > static inline int notrace unwind_next(struct unwind_state *state) > { > - return 0; > + struct stack_info info; > + > + return unwind_next_common(state, &info, NULL); > } > NOKPROBE_SYMBOL(unwind_next); > #else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */ > @@ -75,6 +99,12 @@ static inline bool on_overflow_stack(unsigned long sp, unsigned long size, > return false; > } > > +static inline bool on_hyp_stack(unsigned long sp, unsigned long size, > + struct stack_info *info) > +{ > + return false; > +} > + > static inline int notrace unwind_next(struct unwind_state *state) > { > return 0; > -- > 2.37.0.170.g444d1eabd0-goog > _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm