From: Paul Durrant <pdurrant@xxxxxxxxxx> At the moment pages are marked dirty by open-coded calls to mark_page_dirty_in_slot(), directly deferefencing the gpa and memslot from the cache. After a subsequent patch these may not always be set so add a helper now so that caller will protected from the need to know about this detail. NOTE: Pages are now marked dirty while the cache lock is held. This is to ensure that gpa and memslot are mutually consistent. Signed-off-by: Paul Durrant <pdurrant@xxxxxxxxxx> Reviewed-by: David Woodhouse <dwmw@xxxxxxxxxxxx> --- Cc: David Woodhouse <dwmw2@xxxxxxxxxxxxx> Cc: Sean Christopherson <seanjc@xxxxxxxxxx> Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Borislav Petkov <bp@xxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> Cc: x86@xxxxxxxxxx --- arch/x86/kvm/x86.c | 2 +- arch/x86/kvm/xen.c | 13 ++++++------- include/linux/kvm_host.h | 7 +++++++ virt/kvm/pfncache.c | 6 ++++++ 4 files changed, 20 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6c9c81e82e65..d669a8801265 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3137,7 +3137,7 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v, guest_hv_clock->version = ++vcpu->hv_clock.version; - mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); + kvm_gpc_mark_dirty(gpc); read_unlock_irqrestore(&gpc->lock, flags); trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 40edf4d1974c..33fddd29824b 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -430,14 +430,13 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic) smp_wmb(); } - if (user_len2) + if (user_len2) { + kvm_gpc_mark_dirty(gpc2); read_unlock(&gpc2->lock); + } + kvm_gpc_mark_dirty(gpc1); read_unlock_irqrestore(&gpc1->lock, flags); - - mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT); - if (user_len2) - mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT); } void kvm_xen_update_runstate(struct kvm_vcpu *v, int state) @@ -543,13 +542,13 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v) : "0" (evtchn_pending_sel32)); WRITE_ONCE(vi->evtchn_upcall_pending, 1); } + + kvm_gpc_mark_dirty(gpc); read_unlock_irqrestore(&gpc->lock, flags); /* For the per-vCPU lapic vector, deliver it as MSI. */ if (v->arch.xen.upcall_vector) kvm_xen_inject_vcpu_vector(v); - - mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); } int __kvm_xen_has_interrupt(struct kvm_vcpu *v) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index fb6c6109fdca..c71e8fbccaaf 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1367,6 +1367,13 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len); */ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc); +/** + * kvm_gpc_mark_dirty - mark a cached page as dirty. + * + * @gpc: struct gfn_to_pfn_cache object. + */ +void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc); + void kvm_sigset_activate(struct kvm_vcpu *vcpu); void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index 0f36acdf577f..b68ed7fa56a2 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -386,6 +386,12 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) } EXPORT_SYMBOL_GPL(kvm_gpc_activate); +void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc) +{ + mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); +} +EXPORT_SYMBOL_GPL(kvm_gpc_mark_dirty); + void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc) { struct kvm *kvm = gpc->kvm; -- 2.39.2