David Woodhouse <dwmw2@xxxxxxxxxxxxx> writes: > From: David Woodhouse <dwmw@xxxxxxxxxxxx> > > It shouldn't take a vcpu. > > Signed-off-by: David Woodhouse <dwmw@xxxxxxxxxxxx> > --- > arch/x86/kvm/x86.c | 8 ++++---- > include/linux/kvm_host.h | 4 ++-- > virt/kvm/kvm_main.c | 8 ++++---- > 3 files changed, 10 insertions(+), 10 deletions(-) > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index e545a8a613b1..c7f1ba21212e 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -2957,7 +2957,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu) > return; > > /* -EAGAIN is returned in atomic context so we can just return. */ > - if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, > + if (kvm_map_gfn(vcpu->kvm, vcpu->arch.st.msr_val >> PAGE_SHIFT, > &map, &vcpu->arch.st.cache, false)) > return; > > @@ -2992,7 +2992,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu) > > st->version += 1; > > - kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false); > + kvm_unmap_gfn(vcpu->kvm, &map, &vcpu->arch.st.cache, true, false); > } > > int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > @@ -3981,7 +3981,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) > if (vcpu->arch.st.preempted) > return; > > - if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, > + if (kvm_map_gfn(vcpu->kvm, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, > &vcpu->arch.st.cache, true)) > return; > > @@ -3990,7 +3990,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) > > st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; > > - kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); > + kvm_unmap_gfn(vcpu->kvm, &map, &vcpu->arch.st.cache, true, true); > } > > void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > index 7f2e2a09ebbd..8eb5eb1399f5 100644 > --- a/include/linux/kvm_host.h > +++ b/include/linux/kvm_host.h > @@ -806,11 +806,11 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn > kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); > kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); > int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); > -int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, > +int kvm_map_gfn(struct kvm *kvm, gfn_t gfn, struct kvm_host_map *map, > struct gfn_to_pfn_cache *cache, bool atomic); > struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); > void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); > -int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, > +int kvm_unmap_gfn(struct kvm *kvm, struct kvm_host_map *map, > struct gfn_to_pfn_cache *cache, bool dirty, bool atomic); > unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); > unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > index 2541a17ff1c4..f01a8df7806a 100644 > --- a/virt/kvm/kvm_main.c > +++ b/virt/kvm/kvm_main.c > @@ -2181,10 +2181,10 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, > return 0; > } > > -int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, > +int kvm_map_gfn(struct kvm *kvm, gfn_t gfn, struct kvm_host_map *map, > struct gfn_to_pfn_cache *cache, bool atomic) > { > - return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map, > + return __kvm_map_gfn(kvm_memslots(kvm), gfn, map, > cache, atomic); > } > EXPORT_SYMBOL_GPL(kvm_map_gfn); > @@ -2232,10 +2232,10 @@ static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot, > map->page = NULL; > } > > -int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, > +int kvm_unmap_gfn(struct kvm *kvm, struct kvm_host_map *map, > struct gfn_to_pfn_cache *cache, bool dirty, bool atomic) > { > - __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, > + __kvm_unmap_gfn(gfn_to_memslot(kvm, map->gfn), map, > cache, dirty, atomic); > return 0; > } What about different address space ids? gfn_to_memslot() now calls kvm_memslots() which gives memslots for address space id = 0 but what if we want something different? Note, different vCPUs can (in theory) be in different address spaces so we actually need 'vcpu' and not 'kvm' then. -- Vitaly