From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> Morph PV RDMSR/WRMSR hypercall to EXIT_REASON_MSR_{READ,WRITE} and wire up KVM backend functions. For complete_emulated_msr() callback, instead of injecting #GP on error, implement tdx_complete_emulated_msr() to set return code on error. Also set return value on MSR read according to the values from kvm x86 registers. Suggested-by: Sean Christopherson <seanjc@xxxxxxxxxx> Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> Signed-off-by: Binbin Wu <binbin.wu@xxxxxxxxxxxxxxx> Reviewed-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> --- TDX "the rest" v2: - Morph PV RDMSR/WRMSR hypercall to EXIT_REASON_MSR_{READ,WRITE}. (Sean) - Rebased to use tdcall_to_vmx_exit_reason(). - Marshall values to the appropriate x86 registers to leverage the existing kvm_emulate_{rdmsr,wrmsr}(). (Sean) - Implement complete_emulated_msr() callback for TDX to set return value and return code to vp_enter_args. - Update changelog. TDX "the rest" v1: - Use TDVMCALL_STATUS prefix for TDX call status codes (Binbin) --- arch/x86/kvm/vmx/main.c | 10 +++++++++- arch/x86/kvm/vmx/tdx.c | 24 ++++++++++++++++++++++++ arch/x86/kvm/vmx/tdx.h | 2 ++ 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index 463de3add3bd..7f3be1b65ce1 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -235,6 +235,14 @@ static void vt_msr_filter_changed(struct kvm_vcpu *vcpu) vmx_msr_filter_changed(vcpu); } +static int vt_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) +{ + if (is_td_vcpu(vcpu)) + return tdx_complete_emulated_msr(vcpu, err); + + return kvm_complete_insn_gp(vcpu, err); +} + #ifdef CONFIG_KVM_SMM static int vt_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection) { @@ -686,7 +694,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .migrate_timers = vmx_migrate_timers, .msr_filter_changed = vt_msr_filter_changed, - .complete_emulated_msr = kvm_complete_insn_gp, + .complete_emulated_msr = vt_complete_emulated_msr, .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector, diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index 5b556af0f139..85ff6e040cf3 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -878,6 +878,8 @@ static __always_inline u32 tdcall_to_vmx_exit_reason(struct kvm_vcpu *vcpu) case EXIT_REASON_CPUID: case EXIT_REASON_HLT: case EXIT_REASON_IO_INSTRUCTION: + case EXIT_REASON_MSR_READ: + case EXIT_REASON_MSR_WRITE: return tdvmcall_leaf(vcpu); case EXIT_REASON_EPT_VIOLATION: return EXIT_REASON_EPT_MISCONFIG; @@ -1880,6 +1882,20 @@ static int tdx_handle_ept_violation(struct kvm_vcpu *vcpu) return ret; } +int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) +{ + if (err) { + tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND); + return 1; + } + + if (vmx_get_exit_reason(vcpu).basic == EXIT_REASON_MSR_READ) + tdvmcall_set_return_val(vcpu, kvm_read_edx_eax(vcpu)); + + return 1; +} + + int tdx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t fastpath) { struct vcpu_tdx *tdx = to_tdx(vcpu); @@ -1945,6 +1961,14 @@ int tdx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t fastpath) return tdx_emulate_vmcall(vcpu); case EXIT_REASON_IO_INSTRUCTION: return tdx_emulate_io(vcpu); + case EXIT_REASON_MSR_READ: + kvm_rcx_write(vcpu, tdx->vp_enter_args.r12); + return kvm_emulate_rdmsr(vcpu); + case EXIT_REASON_MSR_WRITE: + kvm_rcx_write(vcpu, tdx->vp_enter_args.r12); + kvm_rax_write(vcpu, tdx->vp_enter_args.r13 & -1u); + kvm_rdx_write(vcpu, tdx->vp_enter_args.r13 >> 32); + return kvm_emulate_wrmsr(vcpu); case EXIT_REASON_EPT_MISCONFIG: return tdx_emulate_mmio(vcpu); case EXIT_REASON_EPT_VIOLATION: diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h index cb9014b7a4f1..8f8070d0f55e 100644 --- a/arch/x86/kvm/vmx/tdx.h +++ b/arch/x86/kvm/vmx/tdx.h @@ -171,6 +171,7 @@ static __always_inline void td_##lclass##_clearbit##bits(struct vcpu_tdx *tdx, \ bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu); +int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err); TDX_BUILD_TDVPS_ACCESSORS(16, VMCS, vmcs); TDX_BUILD_TDVPS_ACCESSORS(32, VMCS, vmcs); @@ -194,6 +195,7 @@ struct vcpu_tdx { }; static inline bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu) { return false; } +static inline int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) { return 0; } #endif -- 2.46.0