On Tue, May 22, 2018 at 10:51:47AM +0800, Jingqi Liu wrote: > A new control bit(bit 29) in the TEST_CTL MSR will be introduced > to enable detection of split locks. > > When bit 29 of the TEST_CTL(33H) MSR is set, the processor > causes an #AC exception to be issued instead of suppressing LOCK on > bus(during split lock access). A previous control bit (bit 31) > in this MSR causes the processor to disable LOCK# assertion for > split locked accesses when set. When bits 29 and 31 are both set, > bit 29 takes precedence. Migration? > > The release document ref below link: > https://software.intel.com/sites/default/files/managed/c5/15/\ > architecture-instruction-set-extensions-programming-reference.pdf > This patch has a dependency on https://lkml.org/lkml/2018/5/14/1157 > > Signed-off-by: Jingqi Liu <jingqi.liu@xxxxxxxxx> > --- > arch/x86/kvm/vmx.c | 13 +++++++++++++ > 1 file changed, 13 insertions(+) > > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c > index 3f16965..07986e0 100644 > --- a/arch/x86/kvm/vmx.c > +++ b/arch/x86/kvm/vmx.c > @@ -610,6 +610,8 @@ struct vcpu_vmx { > > u64 arch_capabilities; > u64 spec_ctrl; > + u64 guest_split_lock_ctrl; > + u64 host_split_lock_ctrl; > > u32 vm_entry_controls_shadow; > u32 vm_exit_controls_shadow; > @@ -6013,6 +6015,8 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx) > vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); > vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); > > + vmx->guest_split_lock_ctrl = 0; > + > if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) > vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); > > @@ -6062,6 +6066,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) > > vmx->rmode.vm86_active = 0; > vmx->spec_ctrl = 0; > + vmx->guest_split_lock_ctrl = 0; > > vcpu->arch.microcode_version = 0x100000000ULL; > vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); > @@ -9725,6 +9730,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) > if (vmx->spec_ctrl) > native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); > > + vmx->host_split_lock_ctrl = native_read_msr(MSR_TEST_CTL); > + native_wrmsrl(MSR_TEST_CTL, vmx->guest_split_lock_ctrl); > + > vmx->__launched = vmx->loaded_vmcs->launched; > > evmcs_rsp = static_branch_unlikely(&enable_evmcs) ? > @@ -9874,6 +9882,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) > if (vmx->spec_ctrl) > native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); > > + vmx->guest_split_lock_ctrl = native_read_msr(MSR_TEST_CTL); > + native_wrmsrl(MSR_TEST_CTL, vmx->host_split_lock_ctrl); > + > /* Eliminate branch target predictions from guest mode */ > vmexit_fill_RSB(); > > @@ -10037,6 +10048,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) > vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW); > vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW); > vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW); > + vmx_disable_intercept_for_msr(msr_bitmap, MSR_TEST_CTL, MSR_TYPE_RW); > + > vmx->msr_bitmap_mode = 0; > > vmx->loaded_vmcs = &vmx->vmcs01; > -- > 1.8.3.1 >