From: Ilias Stamatis <ilstam@xxxxxxxxxx> A subsequent patch fixes write_l1_tsc_offset() to account for nested TSC scaling. Calculating the L1 TSC for logging it with the trace call becomes more complex then. This patch moves the trace call to the caller and avoids code duplication as a result too. Signed-off-by: Ilias Stamatis <ilstam@xxxxxxxxxx> --- arch/x86/kvm/svm/svm.c | 4 ---- arch/x86/kvm/vmx/vmx.c | 3 --- arch/x86/kvm/x86.c | 4 ++++ 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 9790c73f2a32..d2f9d6a9716f 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1090,10 +1090,6 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) svm->vmcb01.ptr->control.tsc_offset = offset; } - trace_kvm_write_tsc_offset(vcpu->vcpu_id, - svm->vmcb->control.tsc_offset - g_tsc_offset, - offset); - svm->vmcb->control.tsc_offset = offset + g_tsc_offset; vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index cbe0cdade38a..49241423b854 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1812,9 +1812,6 @@ static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)) g_tsc_offset = vmcs12->tsc_offset; - trace_kvm_write_tsc_offset(vcpu->vcpu_id, - vcpu->arch.tsc_offset - g_tsc_offset, - offset); vmcs_write64(TSC_OFFSET, offset + g_tsc_offset); return offset + g_tsc_offset; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 87deb119c521..c08295bcf50e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2299,6 +2299,10 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { + trace_kvm_write_tsc_offset(vcpu->vcpu_id, + vcpu->arch.l1_tsc_offset, + offset); + vcpu->arch.l1_tsc_offset = offset; vcpu->arch.tsc_offset = static_call(kvm_x86_write_l1_tsc_offset)(vcpu, offset); } -- 2.17.1