From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> Implements set_msr/get_msr/has_emulated_msr methods for TDX to handle hypercall from guest TD for paravirtualized rdmsr and wrmsr. The TDX module virtualizes MSRs. For some MSRs, it injects #VE to the guest TD upon RDMSR or WRMSR. The exact list of such MSRs are defined in the spec. Upon #VE, the guest TD may execute hypercalls, TDG.VP.VMCALL<INSTRUCTION.RDMSR> and TDG.VP.VMCALL<INSTRUCTION.WRMSR>, which are defined in GHCI (Guest-Host Communication Interface) so that the host VMM (e.g. KVM) can virtualizes the MSRs. Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> Reviewed-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> --- arch/x86/kvm/vmx/main.c | 34 +++++++++++++++++-- arch/x86/kvm/vmx/tdx.c | 68 ++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/vmx/x86_ops.h | 6 ++++ 3 files changed, 105 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c index 95b7a90aa0d7..dec9689afab2 100644 --- a/arch/x86/kvm/vmx/main.c +++ b/arch/x86/kvm/vmx/main.c @@ -205,6 +205,34 @@ static void vt_handle_exit_irqoff(struct kvm_vcpu *vcpu) vmx_handle_exit_irqoff(vcpu); } +static int vt_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ + if (unlikely(is_td_vcpu(vcpu))) + return tdx_set_msr(vcpu, msr_info); + + return vmx_set_msr(vcpu, msr_info); +} + +/* + * The kvm parameter can be NULL (module initialization, or invocation before + * VM creation). Be sure to check the kvm parameter before using it. + */ +static bool vt_has_emulated_msr(struct kvm *kvm, u32 index) +{ + if (kvm && is_td(kvm)) + return tdx_is_emulated_msr(index, true); + + return vmx_has_emulated_msr(kvm, index); +} + +static int vt_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ + if (unlikely(is_td_vcpu(vcpu))) + return tdx_get_msr(vcpu, msr_info); + + return vmx_get_msr(vcpu, msr_info); +} + static void vt_apicv_post_state_restore(struct kvm_vcpu *vcpu) { struct pi_desc *pi = vcpu_to_pi_desc(vcpu); @@ -424,7 +452,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .hardware_enable = vt_hardware_enable, .hardware_disable = vt_hardware_disable, - .has_emulated_msr = vmx_has_emulated_msr, + .has_emulated_msr = vt_has_emulated_msr, .is_vm_type_supported = vt_is_vm_type_supported, .vm_size = sizeof(struct kvm_vmx), @@ -444,8 +472,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = { .update_exception_bitmap = vmx_update_exception_bitmap, .get_msr_feature = vmx_get_msr_feature, - .get_msr = vmx_get_msr, - .set_msr = vmx_set_msr, + .get_msr = vt_get_msr, + .set_msr = vt_set_msr, .get_segment_base = vmx_get_segment_base, .get_segment = vmx_get_segment, .set_segment = vmx_set_segment, diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c index 6ab4a52fc9e9..f46825843a8b 100644 --- a/arch/x86/kvm/vmx/tdx.c +++ b/arch/x86/kvm/vmx/tdx.c @@ -1627,6 +1627,74 @@ void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, *error_code = 0; } +bool tdx_is_emulated_msr(u32 index, bool write) +{ + switch (index) { + case MSR_IA32_UCODE_REV: + case MSR_IA32_ARCH_CAPABILITIES: + case MSR_IA32_POWER_CTL: + case MSR_MTRRcap: + case 0x200 ... 0x26f: + /* IA32_MTRR_PHYS{BASE, MASK}, IA32_MTRR_FIX*_* */ + case MSR_IA32_CR_PAT: + case MSR_MTRRdefType: + case MSR_IA32_TSC_DEADLINE: + case MSR_IA32_MISC_ENABLE: + case MSR_KVM_STEAL_TIME: + case MSR_KVM_POLL_CONTROL: + case MSR_PLATFORM_INFO: + case MSR_MISC_FEATURES_ENABLES: + case MSR_IA32_MCG_CAP: + case MSR_IA32_MCG_STATUS: + case MSR_IA32_MCG_CTL: + case MSR_IA32_MCG_EXT_CTL: + case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_MISC(28) - 1: + /* MSR_IA32_MCx_{CTL, STATUS, ADDR, MISC} */ + return true; + case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff: + /* + * x2APIC registers that are virtualized by the CPU can't be + * emulated, KVM doesn't have access to the virtual APIC page. + */ + switch (index) { + case X2APIC_MSR(APIC_TASKPRI): + case X2APIC_MSR(APIC_PROCPRI): + case X2APIC_MSR(APIC_EOI): + case X2APIC_MSR(APIC_ISR) ... X2APIC_MSR(APIC_ISR + APIC_ISR_NR): + case X2APIC_MSR(APIC_TMR) ... X2APIC_MSR(APIC_TMR + APIC_ISR_NR): + case X2APIC_MSR(APIC_IRR) ... X2APIC_MSR(APIC_IRR + APIC_ISR_NR): + return false; + default: + return true; + } + case MSR_IA32_APICBASE: + case MSR_EFER: + return !write; + case MSR_IA32_MCx_CTL2(0) ... MSR_IA32_MCx_CTL2(31): + /* + * 0x280 - 0x29f: The x86 common code doesn't emulate MCx_CTL2. + * Refer to kvm_{get,set}_msr_common(), + * kvm_mtrr_{get, set}_msr(), and msr_mtrr_valid(). + */ + default: + return false; + } +} + +int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) +{ + if (tdx_is_emulated_msr(msr->index, false)) + return kvm_get_msr_common(vcpu, msr); + return 1; +} + +int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) +{ + if (tdx_is_emulated_msr(msr->index, true)) + return kvm_set_msr_common(vcpu, msr); + return 1; +} + int tdx_dev_ioctl(void __user *argp) { struct kvm_tdx_capabilities __user *user_caps; diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h index 53cf6d5a72a1..64e7da448906 100644 --- a/arch/x86/kvm/vmx/x86_ops.h +++ b/arch/x86/kvm/vmx/x86_ops.h @@ -156,6 +156,9 @@ void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode, void tdx_inject_nmi(struct kvm_vcpu *vcpu); void tdx_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code); +bool tdx_is_emulated_msr(u32 index, bool write); +int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); +int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); int tdx_vm_ioctl(struct kvm *kvm, void __user *argp); int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp); @@ -193,6 +196,9 @@ static inline void tdx_inject_nmi(struct kvm_vcpu *vcpu) {} static inline void tdx_get_exit_info( struct kvm_vcpu *vcpu, u32 *reason, u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code) {} +static inline bool tdx_is_emulated_msr(u32 index, bool write) { return false; } +static inline int tdx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return 1; } +static inline int tdx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return 1; } static inline int tdx_vm_ioctl(struct kvm *kvm, void __user *argp) { return -EOPNOTSUPP; } static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -EOPNOTSUPP; } -- 2.25.1