If running a NV guest on an ARMv8.4-NV capable system, let's allocate an additional page that will be used by the hypervisor to fulfill system register accesses. Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_host.h | 3 ++- arch/arm64/kvm/nested.c | 10 ++++++++++ arch/arm64/kvm/reset.c | 1 + 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 15bbe8ccdefa..5d69992106aa 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -568,7 +568,8 @@ struct kvm_vcpu_arch { */ static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r) { - if (unlikely(r >= __VNCR_START__ && ctxt->vncr_array)) + if (unlikely(cpus_have_final_cap(ARM64_HAS_ENHANCED_NESTED_VIRT) && + r >= __VNCR_START__ && ctxt->vncr_array)) return &ctxt->vncr_array[r - __VNCR_START__]; return (u64 *)&ctxt->sys_regs[r]; diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index 8e99690cdde1..bbdd4633e665 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -35,6 +35,14 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu) if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT)) return -EINVAL; + if (cpus_have_final_cap(ARM64_HAS_ENHANCED_NESTED_VIRT)) { + if (!vcpu->arch.ctxt.vncr_array) + vcpu->arch.ctxt.vncr_array = (u64 *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + + if (!vcpu->arch.ctxt.vncr_array) + return -ENOMEM; + } + mutex_lock(&kvm->lock); /* @@ -64,6 +72,8 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu) kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 2])) { kvm_free_stage2_pgd(&tmp[num_mmus - 1]); kvm_free_stage2_pgd(&tmp[num_mmus - 2]); + free_page((unsigned long)vcpu->arch.ctxt.vncr_array); + vcpu->arch.ctxt.vncr_array = NULL; } else { kvm->arch.nested_mmus_size = num_mmus; ret = 0; diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index d19a9aad2d85..f59c00fb53cc 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -161,6 +161,7 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) if (sve_state) kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu)); kfree(sve_state); + free_page((unsigned long)vcpu->arch.ctxt.vncr_array); } static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) -- 2.30.2