The nVHE hypervisor can use this shared area to dump its stacktrace addresses on hyp_panic(). Symbolization and printing the stacktrace can then be handled by the host in EL1 (done in a later patch in this series). Signed-off-by: Kalesh Singh <kaleshsingh@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_asm.h | 1 + arch/arm64/kvm/arm.c | 34 ++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/nvhe/setup.c | 11 +++++++++++ 3 files changed, 46 insertions(+) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 2e277f2ed671..ad31ac68264f 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -174,6 +174,7 @@ struct kvm_nvhe_init_params { unsigned long hcr_el2; unsigned long vttbr; unsigned long vtcr; + unsigned long stacktrace_hyp_va; }; /* Translate a kernel address @ptr into its equivalent linear mapping */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index d300def44f5c..26005182da20 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -50,6 +50,7 @@ DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized); DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); +DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stacktrace_page); unsigned long kvm_arm_hyp_percpu_base[NR_CPUS]; DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); @@ -1484,6 +1485,7 @@ static void cpu_prepare_hyp_mode(int cpu) tcr |= (idmap_t0sz & GENMASK(TCR_TxSZ_WIDTH - 1, 0)) << TCR_T0SZ_OFFSET; params->tcr_el2 = tcr; + params->stacktrace_hyp_va = kern_hyp_va(per_cpu(kvm_arm_hyp_stacktrace_page, cpu)); params->pgd_pa = kvm_mmu_get_httbr(); if (is_protected_kvm_enabled()) params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS; @@ -1777,6 +1779,7 @@ static void teardown_hyp_mode(void) free_hyp_pgds(); for_each_possible_cpu(cpu) { free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); + free_page(per_cpu(kvm_arm_hyp_stacktrace_page, cpu)); free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order()); } } @@ -1868,6 +1871,23 @@ static int init_hyp_mode(void) per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; } + /* + * Allocate stacktrace pages for Hypervisor-mode. + * This is used by the hypervisor to share its stacktrace + * with the host on a hyp_panic(). + */ + for_each_possible_cpu(cpu) { + unsigned long stacktrace_page; + + stacktrace_page = __get_free_page(GFP_KERNEL); + if (!stacktrace_page) { + err = -ENOMEM; + goto out_err; + } + + per_cpu(kvm_arm_hyp_stacktrace_page, cpu) = stacktrace_page; + } + /* * Allocate and initialize pages for Hypervisor-mode percpu regions. */ @@ -1975,6 +1995,20 @@ static int init_hyp_mode(void) params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE); } + /* + * Map the hyp stacktrace pages. + */ + for_each_possible_cpu(cpu) { + char *stacktrace_page = (char *)per_cpu(kvm_arm_hyp_stacktrace_page, cpu); + + err = create_hyp_mappings(stacktrace_page, stacktrace_page + PAGE_SIZE, + PAGE_HYP); + if (err) { + kvm_err("Cannot map hyp stacktrace page\n"); + goto out_err; + } + } + for_each_possible_cpu(cpu) { char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu]; char *percpu_end = percpu_begin + nvhe_percpu_size(); diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index e8d4ea2fcfa0..9b81bf2d40d7 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -135,6 +135,17 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size, /* Update stack_hyp_va to end of the stack's private VA range */ params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE); + + /* + * Map the stacktrace pages as shared and transfer ownership to + * the hypervisor. + */ + prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_OWNED); + start = (void *)params->stacktrace_hyp_va; + end = start + PAGE_SIZE; + ret = pkvm_create_mappings(start, end, prot); + if (ret) + return ret; } /* -- 2.36.0.464.gb9c8b46e94-goog _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm