On Thu, May 11, 2023, Yang Weijiang wrote: > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index a2494156902d..1d0151f9e575 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -711,6 +711,7 @@ static bool is_valid_passthrough_msr(u32 msr) > return true; > case MSR_IA32_U_CET: > case MSR_IA32_PL3_SSP: > + case MSR_IA32_S_CET: > return true; > } > > @@ -2097,14 +2098,18 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > msr_info->data = vmx->pt_desc.guest.addr_a[index / 2]; > break; > case MSR_IA32_U_CET: > + case MSR_IA32_S_CET: > case MSR_IA32_PL3_SSP: > case MSR_KVM_GUEST_SSP: > if (!kvm_cet_is_msr_accessible(vcpu, msr_info)) > return 1; > - if (msr_info->index == MSR_KVM_GUEST_SSP) > + if (msr_info->index == MSR_KVM_GUEST_SSP) { Unnecessary curly braces. > msr_info->data = vmcs_readl(GUEST_SSP); > - else > + } else if (msr_info->index == MSR_IA32_S_CET) { > + msr_info->data = vmcs_readl(GUEST_S_CET); > + } else { > kvm_get_xsave_msr(msr_info); > + } > break; > case MSR_IA32_DEBUGCTLMSR: > msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL); > @@ -2419,6 +2424,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > vmx->pt_desc.guest.addr_a[index / 2] = data; > break; > case MSR_IA32_U_CET: > + case MSR_IA32_S_CET: > case MSR_IA32_PL3_SSP: > case MSR_KVM_GUEST_SSP: > if (!kvm_cet_is_msr_accessible(vcpu, msr_info)) > @@ -2430,10 +2436,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > if ((msr_index == MSR_IA32_PL3_SSP || > msr_index == MSR_KVM_GUEST_SSP) && (data & GENMASK(2, 0))) > return 1; > - if (msr_index == MSR_KVM_GUEST_SSP) > + if (msr_index == MSR_KVM_GUEST_SSP) { > vmcs_writel(GUEST_SSP, data); > - else > + } else if (msr_index == MSR_IA32_S_CET) { > + vmcs_writel(GUEST_S_CET, data); > + } else { Same here. > kvm_set_xsave_msr(msr_info); > + } > break; > case MSR_IA32_PERF_CAPABILITIES: > if (data && !vcpu_to_pmu(vcpu)->version) > @@ -7322,6 +7331,19 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) > > kvm_wait_lapic_expire(vcpu); > > + /* > + * Save host MSR_IA32_S_CET so that it can be reloaded at vm_exit. > + * No need to save the other two vmcs fields as supervisor SHSTK > + * are not enabled on Intel platform now. > + */ > + if (IS_ENABLED(CONFIG_X86_KERNEL_IBT) && > + (vm_exit_controls_get(vmx) & VM_EXIT_LOAD_CET_STATE)) { > + u64 msr; > + > + rdmsrl(MSR_IA32_S_CET, msr); Reading the MSR on every VM-Enter can't possibly be necessary. At the absolute minimum, this could be moved outside of the fastpath; if the kernel modifies S_CET from NMI context, KVM is hosed. And *if* S_CET isn't static post-boot, this can be done in .prepare_switch_to_guest() so long as S_CET isn't modified from IRQ context. But unless mine eyes deceive me, S_CET is only truly modified during setup_cet(), i.e. is static post boot, which means it can be read once at KVM load time, e.g. just like host_efer. The kernel does save/restore IBT when making BIOS calls, but if KVM is running a vCPU across a BIOS call then we've got bigger issues. > + vmcs_writel(HOST_S_CET, msr); > + } > + > /* The actual VMENTER/EXIT is in the .noinstr.text section. */ > vmx_vcpu_enter_exit(vcpu, __vmx_vcpu_run_flags(vmx)); > > @@ -7735,6 +7757,13 @@ static void vmx_update_intercept_for_cet_msr(struct kvm_vcpu *vcpu) > > incpt |= !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK); > vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP, MSR_TYPE_RW, incpt); > + > + /* > + * If IBT is available to guest, then passthrough S_CET MSR too since > + * kernel IBT is already in mainline kernel tree. > + */ > + incpt = !guest_cpuid_has(vcpu, X86_FEATURE_IBT); > + vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, MSR_TYPE_RW, incpt); > } > > static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) > @@ -7805,7 +7834,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) > /* Refresh #PF interception to account for MAXPHYADDR changes. */ > vmx_update_exception_bitmap(vcpu); > > - if (kvm_cet_user_supported()) > + if (kvm_cet_user_supported() || kvm_cpu_cap_has(X86_FEATURE_IBT)) Yeah, kvm_cet_user_supported() simply looks wrong.