On Tue, Jul 31, 2012 at 04:18:41PM +0530, Nikunj A. Dadhania wrote: > From: Nikunj A. Dadhania <nikunj@xxxxxxxxxxxxxxxxxx> > > Hypervisor code to indicate guest running/pre-empteded status through > msr. The page is now pinned during MSR write time and use > kmap_atomic/kunmap_atomic to access the shared area vcpu_state area. > > Suggested-by: Marcelo Tosatti <mtosatti@xxxxxxxxxx> > Signed-off-by: Nikunj A. Dadhania <nikunj@xxxxxxxxxxxxxxxxxx> > --- > arch/x86/include/asm/kvm_host.h | 7 ++++ > arch/x86/kvm/cpuid.c | 1 + > arch/x86/kvm/x86.c | 71 ++++++++++++++++++++++++++++++++++++++- > 3 files changed, 77 insertions(+), 2 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 09155d6..441348f 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -429,6 +429,13 @@ struct kvm_vcpu_arch { > struct kvm_steal_time steal; > } st; > > + /* indicates vcpu is running or preempted */ > + struct { > + u64 msr_val; > + struct page *vs_page; > + unsigned int vs_offset; > + } v_state; > + > u64 last_guest_tsc; > u64 last_kernel_ns; > u64 last_host_tsc; > diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c > index 0595f13..37ab364 100644 > --- a/arch/x86/kvm/cpuid.c > +++ b/arch/x86/kvm/cpuid.c > @@ -411,6 +411,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, > (1 << KVM_FEATURE_CLOCKSOURCE2) | > (1 << KVM_FEATURE_ASYNC_PF) | > (1 << KVM_FEATURE_PV_EOI) | > + (1 << KVM_FEATURE_VCPU_STATE) | > (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT); > > if (sched_info_on()) > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 59b5950..580abcf 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -806,13 +806,13 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc); > * kvm-specific. Those are put in the beginning of the list. > */ > > -#define KVM_SAVE_MSRS_BEGIN 9 > +#define KVM_SAVE_MSRS_BEGIN 10 > static u32 msrs_to_save[] = { > MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, > MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, > HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, > HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, > - MSR_KVM_PV_EOI_EN, > + MSR_KVM_VCPU_STATE, MSR_KVM_PV_EOI_EN, > MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, > MSR_STAR, > #ifdef CONFIG_X86_64 > @@ -1557,6 +1557,53 @@ static void record_steal_time(struct kvm_vcpu *vcpu) > &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); > } > > +static void kvm_set_atomic(u64 *addr, u64 old, u64 new) > +{ > + int loop = 1000000; > + while (1) { > + if (cmpxchg(addr, old, new) == old) > + break; > + loop--; > + if (!loop) { > + pr_info("atomic cur: %lx old: %lx new: %lx\n", > + *addr, old, new); > + break; > + } > + } > +} A generic "kvm_set_atomic" would need that loop, but in the particular TLB flush case we know that the only information being transmitted is a TLB flush. So this idea should work: old = *addr; if (cmpxchg(addr, old, IN_GUEST_MODE) == FAILURE) kvm_x86_ops->tlb_flush() atomic_set(addr, IN_GUEST_MODE); } else if { if (old & TLB_SHOULD_FLUSH) kvm_x86_ops->tlb_flush() } (the actual pseucode above is pretty ugly and mus be improved but it should be enough to transmit the idea). Of course as long as you make sure the atomic_set does not overwrite information. > + char *kaddr; > + > + if (!(vcpu->arch.v_state.msr_val & KVM_MSR_ENABLED) || > + !vcpu->arch.v_state.vs_page) > + return; If its not enabled vs_page should be NULL? > + > + kaddr = kmap_atomic(vcpu->arch.v_state.vs_page); > + kaddr += vcpu->arch.v_state.vs_offset; > + vs = kaddr; > + kvm_set_atomic(&vs->state, 0, 1 << KVM_VCPU_STATE_IN_GUEST_MODE); > + kunmap_atomic(kaddr); > +} > + > +static void kvm_clear_vcpu_state(struct kvm_vcpu *vcpu) > +{ > + struct kvm_vcpu_state *vs; > + char *kaddr; > + > + if (!(vcpu->arch.v_state.msr_val & KVM_MSR_ENABLED) || > + !vcpu->arch.v_state.vs_page) > + return; Like above. > + kaddr = kmap_atomic(vcpu->arch.v_state.vs_page); > + kaddr += vcpu->arch.v_state.vs_offset; > + vs = kaddr; > + kvm_set_atomic(&vs->state, 1 << KVM_VCPU_STATE_IN_GUEST_MODE, 0); > + kunmap_atomic(kaddr); > +} > + > int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) > { > bool pr = false; > @@ -1676,6 +1723,18 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) > return 1; > break; > > + case MSR_KVM_VCPU_STATE: > + vcpu->arch.v_state.vs_page = gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); > + vcpu->arch.v_state.vs_offset = data & ~(PAGE_MASK | KVM_MSR_ENABLED); Assign vs_offset after success. > + > + if (is_error_page(vcpu->arch.v_state.vs_page)) { > + kvm_release_page_clean(vcpu->arch.time_page); > + vcpu->arch.v_state.vs_page = NULL; > + pr_info("KVM: VCPU_STATE - Unable to pin the page\n"); Missing break or return; > + } > + vcpu->arch.v_state.msr_val = data; > + break; > + > case MSR_IA32_MCG_CTL: Please verify this code carefully again. Also leaking the page reference. > vcpu->arch.apf.msr_val = 0; > vcpu->arch.st.msr_val = 0; > + vcpu->arch.v_state.msr_val = 0; Add a newline and comment (or even better a new helper). > > kvmclock_reset(vcpu); -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html