2017-02-15 23:00+0100, Paolo Bonzini: > From: "Cao, Lei" <Lei.Cao@xxxxxxxxxxx> > > Provide versions of struct gfn_to_hva_cache functions that > take vcpu as a parameter instead of struct kvm. The existing functions > are not needed anymore, so delete them. This allows dirty pages to > be logged in the vcpu dirty ring, instead of the global dirty ring, > for ring-based dirty memory tracking. > > Signed-off-by: Lei Cao <lei.cao@xxxxxxxxxxx> > Message-Id: <CY1PR08MB19929BD2AC47A291FD680E83F04F0@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx> > Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> > --- Reviewd-by: Radim Krčmář <rkrcmar@xxxxxxxxxx> > arch/x86/kvm/lapic.c | 22 ++++++++++------------ > arch/x86/kvm/x86.c | 41 ++++++++++++++++++++--------------------- > include/linux/kvm_host.h | 16 ++++++++-------- > virt/kvm/kvm_main.c | 34 +++++++++++++++++----------------- > 4 files changed, 55 insertions(+), 58 deletions(-) > > diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c > index 9fa5b8164961..acf959441778 100644 > --- a/arch/x86/kvm/lapic.c > +++ b/arch/x86/kvm/lapic.c > @@ -529,16 +529,14 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, > > static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val) > { > - > - return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val, > - sizeof(val)); > + return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, &val, > + sizeof(val)); > } > > static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val) > { > - > - return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val, > - sizeof(*val)); > + return kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.pv_eoi.data, val, > + sizeof(*val)); > } > > static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) > @@ -2287,8 +2285,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) > if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) > return; > > - if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, > - sizeof(u32))) > + if (kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data, > + sizeof(u32))) > return; > > apic_set_tpr(vcpu->arch.apic, data & 0xff); > @@ -2340,14 +2338,14 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) > max_isr = 0; > data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); > > - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, > - sizeof(u32)); > + kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apic->vapic_cache, &data, > + sizeof(u32)); > } > > int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) > { > if (vapic_addr) { > - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, > + if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, > &vcpu->arch.apic->vapic_cache, > vapic_addr, sizeof(u32))) > return -EINVAL; > @@ -2441,7 +2439,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) > vcpu->arch.pv_eoi.msr_val = data; > if (!pv_eoi_enabled(vcpu)) > return 0; > - return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, > + return kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.pv_eoi.data, > addr, sizeof(u8)); > } > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 2a0974383ffe..8d3047c8cce7 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -1811,7 +1811,7 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v) > struct kvm_vcpu_arch *vcpu = &v->arch; > struct pvclock_vcpu_time_info guest_hv_clock; > > - if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, > + if (unlikely(kvm_vcpu_read_guest_cached(v, &vcpu->pv_time, > &guest_hv_clock, sizeof(guest_hv_clock)))) > return; > > @@ -1832,9 +1832,9 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v) > BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); > > vcpu->hv_clock.version = guest_hv_clock.version + 1; > - kvm_write_guest_cached(v->kvm, &vcpu->pv_time, > - &vcpu->hv_clock, > - sizeof(vcpu->hv_clock.version)); > + kvm_vcpu_write_guest_cached(v, &vcpu->pv_time, > + &vcpu->hv_clock, > + sizeof(vcpu->hv_clock.version)); > > smp_wmb(); > > @@ -1848,16 +1848,16 @@ static void kvm_setup_pvclock_page(struct kvm_vcpu *v) > > trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); > > - kvm_write_guest_cached(v->kvm, &vcpu->pv_time, > - &vcpu->hv_clock, > - sizeof(vcpu->hv_clock)); > + kvm_vcpu_write_guest_cached(v, &vcpu->pv_time, > + &vcpu->hv_clock, > + sizeof(vcpu->hv_clock)); > > smp_wmb(); > > vcpu->hv_clock.version++; > - kvm_write_guest_cached(v->kvm, &vcpu->pv_time, > - &vcpu->hv_clock, > - sizeof(vcpu->hv_clock.version)); > + kvm_vcpu_write_guest_cached(v, &vcpu->pv_time, > + &vcpu->hv_clock, > + sizeof(vcpu->hv_clock.version)); > } > > static int kvm_guest_time_update(struct kvm_vcpu *v) > @@ -2090,7 +2090,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) > return 0; > } > > - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, > + if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.apf.data, gpa, > sizeof(u32))) > return 1; > > @@ -2109,7 +2109,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu) > if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) > return; > > - if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, > + if (unlikely(kvm_vcpu_read_guest_cached(vcpu, &vcpu->arch.st.stime, > &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) > return; > > @@ -2120,7 +2120,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu) > > vcpu->arch.st.steal.version += 1; > > - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, > + kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime, > &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); > > smp_wmb(); > @@ -2129,14 +2129,14 @@ static void record_steal_time(struct kvm_vcpu *vcpu) > vcpu->arch.st.last_steal; > vcpu->arch.st.last_steal = current->sched_info.run_delay; > > - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, > + kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime, > &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); > > smp_wmb(); > > vcpu->arch.st.steal.version += 1; > > - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, > + kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.st.stime, > &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); > } > > @@ -2241,7 +2241,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > if (!(data & 1)) > break; > > - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, > + if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, > &vcpu->arch.pv_time, data & ~1ULL, > sizeof(struct pvclock_vcpu_time_info))) > vcpu->arch.pv_time_enabled = false; > @@ -2262,7 +2262,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > if (data & KVM_STEAL_RESERVED_MASK) > return 1; > > - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, > + if (kvm_vcpu_gfn_to_hva_cache_init(vcpu, &vcpu->arch.st.stime, > data & KVM_STEAL_VALID_BITS, > sizeof(struct kvm_steal_time))) > return 1; > @@ -2876,7 +2876,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) > > vcpu->arch.st.steal.preempted = 1; > > - kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime, > + kvm_vcpu_write_guest_offset_cached(vcpu, &vcpu->arch.st.stime, > &vcpu->arch.st.steal.preempted, > offsetof(struct kvm_steal_time, preempted), > sizeof(vcpu->arch.st.steal.preempted)); > @@ -8537,9 +8537,8 @@ static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) > > static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) > { > - > - return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, > - sizeof(val)); > + return kvm_vcpu_write_guest_cached(vcpu, &vcpu->arch.apf.data, &val, > + sizeof(val)); > } > > void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > index cda457bcedc1..17fa466cd5f4 100644 > --- a/include/linux/kvm_host.h > +++ b/include/linux/kvm_host.h > @@ -641,18 +641,18 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, > int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, > unsigned long len); > int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); > -int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, > - void *data, unsigned long len); > +int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, > + void *data, unsigned long len); > int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, > int offset, int len); > int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, > unsigned long len); > -int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, > - void *data, unsigned long len); > -int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, > - void *data, int offset, unsigned long len); > -int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, > - gpa_t gpa, unsigned long len); > +int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc, > + void *data, unsigned long len); > +int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc, > + void *data, int offset, unsigned long len); > +int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc, > + gpa_t gpa, unsigned long len); > int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); > int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); > struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > index a83c186cefc1..263a80513ad9 100644 > --- a/virt/kvm/kvm_main.c > +++ b/virt/kvm/kvm_main.c > @@ -1981,18 +1981,18 @@ static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots, > return 0; > } > > -int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, > +int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, > gpa_t gpa, unsigned long len) > { > - struct kvm_memslots *slots = kvm_memslots(kvm); > + struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); > return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len); > } > -EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); > +EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva_cache_init); > > -int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, > - void *data, int offset, unsigned long len) > +int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, > + void *data, int offset, unsigned long len) > { > - struct kvm_memslots *slots = kvm_memslots(kvm); > + struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); > int r; > gpa_t gpa = ghc->gpa + offset; > > @@ -2002,7 +2002,7 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, > __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len); > > if (unlikely(!ghc->memslot)) > - return kvm_write_guest(kvm, gpa, data, len); > + return kvm_vcpu_write_guest(vcpu, gpa, data, len); > > if (kvm_is_error_hva(ghc->hva)) > return -EFAULT; > @@ -2014,19 +2014,19 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, > > return 0; > } > -EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached); > +EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_offset_cached); > > -int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, > - void *data, unsigned long len) > +int kvm_vcpu_write_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, > + void *data, unsigned long len) > { > - return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); > + return kvm_vcpu_write_guest_offset_cached(vcpu, ghc, data, 0, len); > } > -EXPORT_SYMBOL_GPL(kvm_write_guest_cached); > +EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_cached); > > -int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, > - void *data, unsigned long len) > +int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc, > + void *data, unsigned long len) > { > - struct kvm_memslots *slots = kvm_memslots(kvm); > + struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu); > int r; > > BUG_ON(len > ghc->len); > @@ -2035,7 +2035,7 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, > __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len); > > if (unlikely(!ghc->memslot)) > - return kvm_read_guest(kvm, ghc->gpa, data, len); > + return kvm_vcpu_read_guest(vcpu, ghc->gpa, data, len); > > if (kvm_is_error_hva(ghc->hva)) > return -EFAULT; > @@ -2046,7 +2046,7 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, > > return 0; > } > -EXPORT_SYMBOL_GPL(kvm_read_guest_cached); > +EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_cached); > > int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) > { > -- > 1.8.3.1 >