On Thu, Aug 03, 2023 at 12:27:26AM -0400, Yang Weijiang wrote: >Pass through CET MSRs when the associated feature is enabled. >Shadow Stack feature requires all the CET MSRs to make it >architectural support in guest. IBT feature only depends on >MSR_IA32_U_CET and MSR_IA32_S_CET to enable both user and >supervisor IBT. Note, This MSR design introduced an architectual >limitation of SHSTK and IBT control for guest, i.e., when SHSTK >is exposed, IBT is also available to guest from architectual level >since IBT relies on subset of SHSTK relevant MSRs. > >Signed-off-by: Yang Weijiang <weijiang.yang@xxxxxxxxx> Reviewed-by: Chao Gao <chao.gao@xxxxxxxxx> one nit below >--- > arch/x86/kvm/vmx/vmx.c | 41 +++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 41 insertions(+) > >diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c >index ccf750e79608..6779b8a63789 100644 >--- a/arch/x86/kvm/vmx/vmx.c >+++ b/arch/x86/kvm/vmx/vmx.c >@@ -709,6 +709,10 @@ static bool is_valid_passthrough_msr(u32 msr) > case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8: > /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */ > return true; >+ case MSR_IA32_U_CET: >+ case MSR_IA32_S_CET: >+ case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB: >+ return true; > } > > r = possible_passthrough_msr_slot(msr) != -ENOENT; >@@ -7747,6 +7751,41 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) > vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4)); > } > >+static void vmx_update_intercept_for_cet_msr(struct kvm_vcpu *vcpu) >+{ >+ bool incpt; >+ >+ if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) { >+ incpt = !guest_cpuid_has(vcpu, X86_FEATURE_SHSTK); ... >+ >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET, >+ MSR_TYPE_RW, incpt); >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, >+ MSR_TYPE_RW, incpt); >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP, >+ MSR_TYPE_RW, incpt); >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP, >+ MSR_TYPE_RW, incpt); >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP, >+ MSR_TYPE_RW, incpt); >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP, >+ MSR_TYPE_RW, incpt); >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_INT_SSP_TAB, >+ MSR_TYPE_RW, incpt); >+ if (!incpt) >+ return; >+ } >+ >+ if (kvm_cpu_cap_has(X86_FEATURE_IBT)) { >+ incpt = !guest_can_use(vcpu, X86_FEATURE_IBT); can you use guest_can_use() or guest_cpuid_has() consistently? >+ >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET, >+ MSR_TYPE_RW, incpt); >+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, >+ MSR_TYPE_RW, incpt); >+ } >+} >+ > static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) > { > struct vcpu_vmx *vmx = to_vmx(vcpu); >@@ -7814,6 +7853,8 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) > > /* Refresh #PF interception to account for MAXPHYADDR changes. */ > vmx_update_exception_bitmap(vcpu); >+ >+ vmx_update_intercept_for_cet_msr(vcpu); > } > > static u64 vmx_get_perf_capabilities(void) >-- >2.27.0 >