On 10/17/2012 04:10 AM, Will Auld wrote: > Signed-off-by: Will Auld <will.auld@xxxxxxxxx> > --- > > Resending to full list > > Marcelo, > > This patch is what I believe you ask for as foundational for later > patches to address IA32_TSC_ADJUST. > Please write a changelog to reflect the motivation. All those bool parameters scattered all over the place aren't very pretty. Usually we solve this with helpers that embed the parameter name (kvm_set_msr() vs. kvm_set_msr_host()) but there are too many functions for this to work here. Marcelo, any ideas? > Thanks, > > Will > > arch/x86/include/asm/kvm_host.h | 8 ++++---- > arch/x86/kvm/svm.c | 18 ++++++++++-------- > arch/x86/kvm/vmx.c | 18 ++++++++++-------- > arch/x86/kvm/x86.c | 18 ++++++++++-------- > arch/x86/kvm/x86.h | 2 +- > 5 files changed, 35 insertions(+), 29 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 09155d6..c06f0d1 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -621,7 +621,7 @@ struct kvm_x86_ops { > void (*set_guest_debug)(struct kvm_vcpu *vcpu, > struct kvm_guest_debug *dbg); > int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); > - int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); > + int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data, bool guest_initiated); > u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); > void (*get_segment)(struct kvm_vcpu *vcpu, > struct kvm_segment *var, int seg); > @@ -684,7 +684,7 @@ struct kvm_x86_ops { > bool (*has_wbinvd_exit)(void); > > void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale); > - void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); > + void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset, bool guest_initiated); > > u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc); > u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu); > @@ -772,7 +772,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu, > > void kvm_enable_efer_bits(u64); > int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); > -int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); > +int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data, bool guest_initiated); > > struct x86_emulate_ctxt; > > @@ -799,7 +799,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); > int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr); > > int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); > -int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); > +int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool guest_initiated); > > unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu); > void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); > diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c > index baead95..424be27 100644 > --- a/arch/x86/kvm/svm.c > +++ b/arch/x86/kvm/svm.c > @@ -1012,7 +1012,8 @@ static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) > svm->tsc_ratio = ratio; > } > > -static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) > +static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset, > + bool guest_initiated) > { > struct vcpu_svm *svm = to_svm(vcpu); > u64 g_tsc_offset = 0; > @@ -1255,7 +1256,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) > svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; > svm->asid_generation = 0; > init_vmcb(svm); > - kvm_write_tsc(&svm->vcpu, 0); > + kvm_write_tsc(&svm->vcpu, 0, false /*Not Guest Initiated*/); > > err = fx_init(&svm->vcpu); > if (err) > @@ -3147,13 +3148,14 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data) > return 0; > } > > -static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) > +static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data, > + bool guest_initiated) > { > struct vcpu_svm *svm = to_svm(vcpu); > > switch (ecx) { > case MSR_IA32_TSC: > - kvm_write_tsc(vcpu, data); > + kvm_write_tsc(vcpu, data, guest_initiated); > break; > case MSR_STAR: > svm->vmcb->save.star = data; > @@ -3208,12 +3210,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) > vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); > break; > default: > - return kvm_set_msr_common(vcpu, ecx, data); > + return kvm_set_msr_common(vcpu, ecx, data, guest_initiated); > } > return 0; > } > > -static int wrmsr_interception(struct vcpu_svm *svm) > +static int wrmsr_interception(struct vcpu_svm *svm, bool guest_initiated) > { > u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; > u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) > @@ -3221,7 +3223,7 @@ static int wrmsr_interception(struct vcpu_svm *svm) > > > svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; > - if (svm_set_msr(&svm->vcpu, ecx, data)) { > + if (svm_set_msr(&svm->vcpu, ecx, data, guest_initiated)) { > trace_kvm_msr_write_ex(ecx, data); > kvm_inject_gp(&svm->vcpu, 0); > } else { > @@ -3234,7 +3236,7 @@ static int wrmsr_interception(struct vcpu_svm *svm) > static int msr_interception(struct vcpu_svm *svm) > { > if (svm->vmcb->control.exit_info_1) > - return wrmsr_interception(svm); > + return wrmsr_interception(svm, true /*Guest Initiated*/); > else > return rdmsr_interception(svm); > } > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c > index c00f03d..85a9603 100644 > --- a/arch/x86/kvm/vmx.c > +++ b/arch/x86/kvm/vmx.c > @@ -1864,7 +1864,8 @@ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) > /* > * writes 'offset' into guest's timestamp counter offset register > */ > -static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) > +static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset, > + bool guest_initiated) > { > if (is_guest_mode(vcpu)) { > /* > @@ -2197,7 +2198,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) > * Returns 0 on success, non-0 otherwise. > * Assumes vcpu_load() was already called. > */ > -static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) > +static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data, > + bool guest_initiated) > { > struct vcpu_vmx *vmx = to_vmx(vcpu); > struct shared_msr_entry *msr; > @@ -2205,7 +2207,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) > > switch (msr_index) { > case MSR_EFER: > - ret = kvm_set_msr_common(vcpu, msr_index, data); > + ret = kvm_set_msr_common(vcpu, msr_index, data, guest_initiated); > break; > #ifdef CONFIG_X86_64 > case MSR_FS_BASE: > @@ -2231,7 +2233,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) > vmcs_writel(GUEST_SYSENTER_ESP, data); > break; > case MSR_IA32_TSC: > - kvm_write_tsc(vcpu, data); > + kvm_write_tsc(vcpu, data, guest_initiated); > break; > case MSR_IA32_CR_PAT: > if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { > @@ -2239,7 +2241,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) > vcpu->arch.pat = data; > break; > } > - ret = kvm_set_msr_common(vcpu, msr_index, data); > + ret = kvm_set_msr_common(vcpu, msr_index, data, guest_initiated); > break; > case MSR_TSC_AUX: > if (!vmx->rdtscp_enabled) > @@ -2262,7 +2264,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) > } > break; > } > - ret = kvm_set_msr_common(vcpu, msr_index, data); > + ret = kvm_set_msr_common(vcpu, msr_index, data, guest_initiated); > } > > return ret; > @@ -3918,7 +3920,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) > vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); > set_cr4_guest_host_mask(vmx); > > - kvm_write_tsc(&vmx->vcpu, 0); > + kvm_write_tsc(&vmx->vcpu, 0, false /*Not Guest Initiated*/); > > return 0; > } > @@ -4653,7 +4655,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu) > u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) > | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); > > - if (vmx_set_msr(vcpu, ecx, data) != 0) { > + if (vmx_set_msr(vcpu, ecx, data, true /*Guest Initiated*/) != 0) { > trace_kvm_msr_write_ex(ecx, data); > kvm_inject_gp(vcpu, 0); > return 1; > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 42bce48..9b1263d 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -883,9 +883,9 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); > * Returns 0 on success, non-0 otherwise. > * Assumes vcpu_load() was already called. > */ > -int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) > +int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data, bool guest_initiated) > { > - return kvm_x86_ops->set_msr(vcpu, msr_index, data); > + return kvm_x86_ops->set_msr(vcpu, msr_index, data, guest_initiated); > } > > /* > @@ -893,7 +893,7 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) > */ > static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) > { > - return kvm_set_msr(vcpu, index, *data); > + return kvm_set_msr(vcpu, index, *data, false /*Not Guest Initiated*/); > } > > static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) > @@ -1043,7 +1043,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) > return tsc; > } > > -void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) > +void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data, bool guest_initiated) > { > struct kvm *kvm = vcpu->kvm; > u64 offset, ns, elapsed; > @@ -1126,7 +1126,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) > vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; > vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; > > - kvm_x86_ops->write_tsc_offset(vcpu, offset); > + kvm_x86_ops->write_tsc_offset(vcpu, offset, guest_initiated); > raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); > } > > @@ -1561,7 +1561,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu) > &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); > } > > -int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) > +int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, > + bool guest_initiated) > { > bool pr = false; > > @@ -2324,7 +2325,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) > if (check_tsc_unstable()) { > u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu, > vcpu->arch.last_guest_tsc); > - kvm_x86_ops->write_tsc_offset(vcpu, offset); > + kvm_x86_ops->write_tsc_offset(vcpu, offset, > + false /*Not Guest Initiated*/); > vcpu->arch.tsc_catchup = 1; > } > kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); > @@ -4286,7 +4288,7 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, > static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, > u32 msr_index, u64 data) > { > - return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data); > + return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data, false /*Not Guest Initiated*/); > } > > static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, > diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h > index 3d1134d..241f62c 100644 > --- a/arch/x86/kvm/x86.h > +++ b/arch/x86/kvm/x86.h > @@ -112,7 +112,7 @@ void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); > void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); > int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); > > -void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data); > +void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data, bool guest_initiated); > > int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, > gva_t addr, void *val, unsigned int bytes, > -- error compiling committee.c: too many arguments to function -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html