Transition code for to use the new hyp_running pointers. Everything is consistent, because all fields are in-sync. Remove __hyp_running_vcpu now that no one is using it. Signed-off-by: Fuad Tabba <tabba@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_asm.h | 24 ++++-------------------- arch/arm64/include/asm/kvm_host.h | 5 +---- arch/arm64/kernel/asm-offsets.c | 1 - arch/arm64/kvm/handle_exit.c | 6 +++--- arch/arm64/kvm/hyp/nvhe/host.S | 2 +- arch/arm64/kvm/hyp/nvhe/switch.c | 4 +--- arch/arm64/kvm/hyp/vhe/switch.c | 8 ++++---- 7 files changed, 14 insertions(+), 36 deletions(-) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 52079e937fcd..e24ebcf9e0d3 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -246,31 +246,18 @@ extern u32 __kvm_get_mdcr_el2(void); add \reg, \reg, #HOST_DATA_CONTEXT .endm -.macro get_vcpu_ptr vcpu, ctxt - get_host_ctxt \ctxt, \vcpu - ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] -.endm - .macro get_vcpu_ctxt_ptr vcpu, ctxt get_host_ctxt \ctxt, \vcpu - ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] - add \vcpu, \vcpu, #VCPU_CONTEXT + ldr \vcpu, [\ctxt, #HOST_CONTEXT_CTXT] .endm .macro get_vcpu_hyps_ptr vcpu, ctxt get_host_ctxt \ctxt, \vcpu - ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] - add \vcpu, \vcpu, #VCPU_HYPS -.endm - -.macro get_loaded_vcpu vcpu, ctxt - adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu - ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] + ldr \vcpu, [\ctxt, #HOST_CONTEXT_HYPS] .endm .macro set_loaded_vcpu vcpu, ctxt, tmp adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp - str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] add \tmp, \vcpu, #VCPU_CONTEXT str \tmp, [\ctxt, #HOST_CONTEXT_CTXT] @@ -281,21 +268,18 @@ extern u32 __kvm_get_mdcr_el2(void); .macro clear_loaded_vcpu ctxt, tmp adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp - str xzr, [\ctxt, #HOST_CONTEXT_VCPU] str xzr, [\ctxt, #HOST_CONTEXT_CTXT] str xzr, [\ctxt, #HOST_CONTEXT_HYPS] .endm .macro get_loaded_vcpu_ctxt vcpu, ctxt adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu - ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] - add \vcpu, \vcpu, #VCPU_CONTEXT + ldr \vcpu, [\ctxt, #HOST_CONTEXT_CTXT] .endm .macro get_loaded_vcpu_hyps vcpu, ctxt adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu - ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] - add \vcpu, \vcpu, #VCPU_HYPS + ldr \vcpu, [\ctxt, #HOST_CONTEXT_HYPS] .endm /* diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index b42d0c6c8004..035ca5a49166 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -227,15 +227,12 @@ struct kvm_cpu_context { u64 sys_regs[NR_SYS_REGS]; - struct kvm_vcpu *__hyp_running_vcpu; struct kvm_cpu_context *__hyp_running_ctxt; struct vcpu_hyp_state *__hyp_running_hyps; }; -#define get_hyp_running_vcpu(ctxt) (ctxt)->__hyp_running_vcpu #define set_hyp_running_vcpu(host_ctxt, vcpu) do { \ struct kvm_vcpu *v = (vcpu); \ - (host_ctxt)->__hyp_running_vcpu = v; \ if (vcpu) { \ (host_ctxt)->__hyp_running_ctxt = &v->arch.ctxt; \ (host_ctxt)->__hyp_running_hyps = &v->arch.hyp_state; \ @@ -245,7 +242,7 @@ struct kvm_cpu_context { }\ } while(0) -#define is_hyp_running_vcpu(ctxt) (ctxt)->__hyp_running_vcpu +#define is_hyp_running_vcpu(ctxt) (ctxt)->__hyp_running_ctxt #define get_hyp_running_ctxt(host_ctxt) (host_ctxt)->__hyp_running_ctxt #define get_hyp_running_hyps(host_ctxt) (host_ctxt)->__hyp_running_hyps diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 9c25078da294..f42aea730cf4 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -116,7 +116,6 @@ int main(void) DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1])); DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1])); DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); - DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); DEFINE(HOST_CONTEXT_CTXT, offsetof(struct kvm_cpu_context, __hyp_running_ctxt)); DEFINE(HOST_CONTEXT_HYPS, offsetof(struct kvm_cpu_context, __hyp_running_hyps)); DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt)); diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 22e9f03fe901..cb6a25b79e38 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -293,7 +293,7 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index) } void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr, - u64 par, uintptr_t vcpu, + u64 par, uintptr_t vcpu_ctxt, u64 far, u64 hpfar) { u64 elr_in_kimg = __phys_to_kimg(__hyp_pa(elr)); u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr; @@ -333,6 +333,6 @@ void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr, */ kvm_err("Hyp Offset: 0x%llx\n", hyp_offset); - panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n", - spsr, elr, esr, far, hpfar, par, vcpu); + panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU_CTXT:%016lx\n", + spsr, elr, esr, far, hpfar, par, vcpu_ctxt); } diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index 7de2e8716f69..975cf125d54c 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -87,7 +87,7 @@ SYM_FUNC_START(__hyp_do_panic) /* Load the panic arguments into x0-7 */ mrs x0, esr_el2 - get_vcpu_ptr x4, x5 + get_vcpu_ctxt_ptr x4, x5 mrs x5, far_el2 mrs x6, hpfar_el2 mov x7, xzr // Unused argument diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 12c673301210..483df8fe052e 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -272,14 +272,12 @@ void __noreturn hyp_panic(void) u64 elr = read_sysreg_el2(SYS_ELR); u64 par = read_sysreg_par(); struct kvm_cpu_context *host_ctxt; - struct kvm_vcpu *vcpu; struct vcpu_hyp_state *vcpu_hyps; host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; - vcpu = get_hyp_running_vcpu(host_ctxt); vcpu_hyps = get_hyp_running_hyps(host_ctxt); - if (vcpu) { + if (vcpu_hyps) { __timer_disable_traps(); __deactivate_traps(vcpu_hyps); __load_host_stage2(); diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 14c434e00914..64de9f0d7636 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -203,20 +203,20 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) static void __hyp_call_panic(u64 spsr, u64 elr, u64 par) { struct kvm_cpu_context *host_ctxt; - struct kvm_vcpu *vcpu; + struct kvm_cpu_context *vcpu_ctxt; struct vcpu_hyp_state *vcpu_hyps; host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; - vcpu = get_hyp_running_vcpu(host_ctxt); + vcpu_ctxt = get_hyp_running_ctxt(host_ctxt); vcpu_hyps = get_hyp_running_hyps(host_ctxt); __deactivate_traps(vcpu_hyps); sysreg_restore_host_state_vhe(host_ctxt); - panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n", + panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU_CTXT:%p\n", spsr, elr, read_sysreg_el2(SYS_ESR), read_sysreg_el2(SYS_FAR), - read_sysreg(hpfar_el2), par, vcpu); + read_sysreg(hpfar_el2), par, vcpu_ctxt); } NOKPROBE_SYMBOL(__hyp_call_panic); -- 2.33.0.685.g46640cef36-goog