The running vCPU has little to do with the CPU context so extract it to a percpu variable. Unfortunately, the __hyp_running_vcpu field was also being used to identify the CPU context of the host, in the nVHE speculative AT workaround, since it will be non-NULL for the host and NULL for the guests. This is handled by having an explicit identificaiton of the host context which also makes the workaround conditions much easier to comprehend. Signed-off-by: Andrew Scull <ascull@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_asm.h | 5 ----- arch/arm64/include/asm/kvm_host.h | 4 +++- arch/arm64/include/asm/kvm_hyp.h | 2 ++ arch/arm64/kernel/asm-offsets.c | 1 - arch/arm64/kernel/image-vars.h | 1 + arch/arm64/kvm/arm.c | 10 ++++++++++ arch/arm64/kvm/hyp/hyp-entry.S | 10 +++++----- arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 4 ++-- arch/arm64/kvm/hyp/nvhe/switch.c | 4 ++-- arch/arm64/kvm/hyp/vhe/switch.c | 5 ++--- 10 files changed, 27 insertions(+), 19 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index fb1a922b31ba..ebe9d582f360 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -188,11 +188,6 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; add \reg, \reg, #HOST_DATA_CONTEXT .endm -.macro get_vcpu_ptr vcpu, ctxt - get_host_ctxt \ctxt, \vcpu - ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] -.endm - #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index b06f24b5f443..67a760d08b6e 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -260,7 +260,7 @@ struct kvm_cpu_context { u32 copro[NR_COPRO_REGS]; }; - struct kvm_vcpu *__hyp_running_vcpu; + bool is_host; }; struct kvm_pmu_events { @@ -578,6 +578,8 @@ DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data); static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) { + cpu_ctxt->is_host = true; + /* The host's MPIDR is immutable, so let's set it up at boot time */ ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr(); } diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 46689e7db46c..07745d9c49fc 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -12,6 +12,8 @@ #include <asm/alternative.h> #include <asm/sysreg.h> +DECLARE_PER_CPU(struct kvm_vcpu *, kvm_hyp_running_vcpu); + #define read_sysreg_elx(r,nvh,vh) \ ({ \ u64 reg; \ diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 7d32fc959b1a..cbe6a0def777 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -108,7 +108,6 @@ int main(void) DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1])); DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1])); DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); - DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt)); #endif #ifdef CONFIG_CPU_PM diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 9e897c500237..dfe0f37567f3 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -71,6 +71,7 @@ KVM_NVHE_ALIAS(kvm_update_va_mask); /* Global kernel state accessed by nVHE hyp code. */ KVM_NVHE_ALIAS(arm64_ssbd_callback_required); KVM_NVHE_ALIAS(kvm_host_data); +KVM_NVHE_ALIAS(kvm_hyp_running_vcpu); KVM_NVHE_ALIAS(kvm_vgic_global_state); /* Kernel constant needed to compute idmap addresses. */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 98f05bdac3c1..94f4481e5637 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -47,6 +47,7 @@ __asm__(".arch_extension virt"); #endif DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data); +DEFINE_PER_CPU(struct kvm_vcpu *, kvm_hyp_running_vcpu); static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); /* The VMID used in the VTTBR */ @@ -1537,6 +1538,7 @@ static int init_hyp_mode(void) for_each_possible_cpu(cpu) { kvm_host_data_t *cpu_data; + struct kvm_vcpu **running_vcpu; cpu_data = per_cpu_ptr(&kvm_host_data, cpu); err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP); @@ -1545,6 +1547,14 @@ static int init_hyp_mode(void) kvm_err("Cannot map host CPU state: %d\n", err); goto out_err; } + + running_vcpu = per_cpu_ptr(&kvm_hyp_running_vcpu, cpu); + err = create_hyp_mappings(running_vcpu, running_vcpu + 1, PAGE_HYP); + + if (err) { + kvm_err("Cannot map running vCPU: %d\n", err); + goto out_err; + } } err = hyp_map_aux_data(); diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 689fccbc9de7..e727bee8e110 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -96,7 +96,7 @@ el1_hvc_guest: alternative_cb arm64_enable_wa2_handling b wa2_end alternative_cb_end - get_vcpu_ptr x2, x0 + hyp_ldr_this_cpu x2, kvm_hyp_running_vcpu, x0 ldr x0, [x2, #VCPU_WORKAROUND_FLAGS] // Sanitize the argument and update the guest flags @@ -128,17 +128,17 @@ wa_epilogue: sb el1_trap: - get_vcpu_ptr x1, x0 + hyp_ldr_this_cpu x1, kvm_hyp_running_vcpu, x0 mov x0, #ARM_EXCEPTION_TRAP b __guest_exit el1_irq: - get_vcpu_ptr x1, x0 + hyp_ldr_this_cpu x1, kvm_hyp_running_vcpu, x0 mov x0, #ARM_EXCEPTION_IRQ b __guest_exit el1_error: - get_vcpu_ptr x1, x0 + hyp_ldr_this_cpu x1, kvm_hyp_running_vcpu, x0 mov x0, #ARM_EXCEPTION_EL1_SERROR b __guest_exit @@ -151,7 +151,7 @@ el2_sync: b.eq __hyp_panic /* Let's attempt a recovery from the illegal exception return */ - get_vcpu_ptr x1, x0 + hyp_ldr_this_cpu x1, kvm_hyp_running_vcpu, x0 mov x0, #ARM_EXCEPTION_IL b __guest_exit diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index 7a986030145f..c55b2d17ada8 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -80,7 +80,7 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) !cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR); write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR); - } else if (!ctxt->__hyp_running_vcpu) { + } else if (!ctxt->is_host) { /* * Must only be done for guest registers, hence the context * test. We're coming from the host, so SCTLR.M is already @@ -109,7 +109,7 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) if (!has_vhe() && cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) && - ctxt->__hyp_running_vcpu) { + ctxt->is_host) { /* * Must only be done for host registers, hence the context * test. Pairs with nVHE's __deactivate_traps(). diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 341be2f2f312..99cf90200bf7 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -176,7 +176,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) vcpu = kern_hyp_va(vcpu); host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; - host_ctxt->__hyp_running_vcpu = vcpu; + *__hyp_this_cpu_ptr(kvm_hyp_running_vcpu) = vcpu; guest_ctxt = &vcpu->arch.ctxt; pmu_switch_needed = __pmu_switch_to_guest(host_ctxt); @@ -247,7 +247,7 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) u64 spsr = read_sysreg_el2(SYS_SPSR); u64 elr = read_sysreg_el2(SYS_ELR); u64 par = read_sysreg(par_el1); - struct kvm_vcpu *vcpu = host_ctxt->__hyp_running_vcpu; + struct kvm_vcpu *vcpu = __hyp_this_cpu_read(kvm_hyp_running_vcpu); unsigned long str_va; if (read_sysreg(vttbr_el2)) { diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index c52d714e0d75..366dc386224c 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -109,7 +109,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) u64 exit_code; host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; - host_ctxt->__hyp_running_vcpu = vcpu; + *__hyp_this_cpu_ptr(kvm_hyp_running_vcpu) = vcpu; guest_ctxt = &vcpu->arch.ctxt; sysreg_save_host_state_vhe(host_ctxt); @@ -195,8 +195,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) static void __hyp_call_panic(u64 spsr, u64 elr, u64 par, struct kvm_cpu_context *host_ctxt) { - struct kvm_vcpu *vcpu; - vcpu = host_ctxt->__hyp_running_vcpu; + struct kvm_vcpu *vcpu = __hyp_this_cpu_read(kvm_hyp_running_vcpu); __deactivate_traps(vcpu); sysreg_restore_host_state_vhe(host_ctxt); -- 2.27.0.389.gc38d7665816-goog _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm