From: Chao Peng <chao.p.peng@xxxxxxxxxxxxxxx> Load/Store Intel processor trace register in context switch. MSR IA32_RTIT_CTL is loaded/stored automatically from VMCS, other MSRs are loaded/stored manaully. Signed-off-by: Chao Peng <chao.p.peng@xxxxxxxxxxxxxxx> Signed-off-by: Luwei Kang <luwei.kang@xxxxxxxxx> --- arch/x86/kvm/vmx.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7eaf774..091120e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2131,6 +2131,61 @@ static unsigned long segment_base(u16 selector) } #endif +static inline void pt_load_msr(struct pt_ctx *ctx, unsigned int addr_num) +{ + u32 i; + + wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status); + wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); + wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); + wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); + for (i = 0; i < addr_num; i++) + wrmsrl(MSR_IA32_RTIT_ADDR0_A + i, ctx->addrs[i]); +} + +static inline void pt_save_msr(struct pt_ctx *ctx, unsigned int addr_num) +{ + u32 i; + + rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status); + rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); + rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); + rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); + for (i = 0; i < addr_num; i++) + rdmsrl(MSR_IA32_RTIT_ADDR0_A + i, ctx->addrs[i]); +} + +static void pt_guest_enter(struct vcpu_vmx *vmx) +{ + u64 ctl; + + if (pt_mode == PT_MODE_HOST || pt_mode == PT_MODE_HOST_GUEST) { + rdmsrl(MSR_IA32_RTIT_CTL, ctl); + vmx->pt_desc.host.ctl = ctl; + if (ctl & RTIT_CTL_TRACEEN) { + ctl &= ~RTIT_CTL_TRACEEN; + wrmsrl(MSR_IA32_RTIT_CTL, ctl); + } + } + + if (pt_mode == PT_MODE_HOST_GUEST) { + pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_num); + pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_num); + } +} + +static void pt_guest_exit(struct vcpu_vmx *vmx) +{ + if (pt_mode == PT_MODE_HOST_GUEST) { + pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_num); + pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_num); + wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); + } + + if (pt_mode == PT_MODE_HOST) + wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); +} + static void vmx_save_host_state(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -5618,6 +5673,13 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); } + if (pt_mode == PT_MODE_HOST_GUEST) { + u32 eax, ebx, ecx, edx; + + cpuid_count(0x14, 1, &eax, &ebx, &ecx, &edx); + vmx->pt_desc.addr_num = eax & 0x7; + } + return 0; } @@ -5712,6 +5774,15 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) update_exception_bitmap(vcpu); vpid_sync_context(vmx->vpid); + + if (pt_mode == PT_MODE_HOST_GUEST) { + memset(&vmx->pt_desc.host, 0, sizeof(vmx->pt_desc.host)); + memset(&vmx->pt_desc.guest, 0, sizeof(vmx->pt_desc.guest)); + /* Bit[6~0] are forced to 1, writes are ignored. */ + vmx->pt_desc.guest.output_mask = 0x7F; + /* Clear the status of IA32_RTIT_CTL in VMCS guest state. */ + vmcs_write32(GUEST_IA32_RTIT_CTL, 0); + } } /* @@ -9382,6 +9453,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.pkru != vmx->host_pkru) __write_pkru(vcpu->arch.pkru); + pt_guest_enter(vmx); + atomic_switch_perf_msrs(vmx); debugctlmsr = get_debugctlmsr(); @@ -9517,6 +9590,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | (1 << VCPU_EXREG_CR3)); vcpu->arch.regs_dirty = 0; + pt_guest_exit(vmx); + /* * eager fpu is enabled if PKEY is supported and CR4 is switched * back on host, so it is safe to read guest PKRU from current -- 1.8.3.1