From: Sean Christopherson <seanjc@xxxxxxxxxx> Replace the guest_uses_pa and kernel_map booleans in the PFN cache code with a unified enum/bitmask. Using explicit names makes it easier to review and audit call sites. Opportunistically add a WARN to prevent passing garbage; instantating a cache without declaring its usage is either buggy or pointless. Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> Signed-off-by: David Woodhouse <dwmw@xxxxxxxxxxxx> --- arch/x86/kvm/xen.c | 2 +- include/linux/kvm_host.h | 11 +++++------ include/linux/kvm_types.h | 10 ++++++++-- virt/kvm/pfncache.c | 14 +++++++------- 4 files changed, 21 insertions(+), 16 deletions(-) diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index 4aa0f2b31665..5be1c9227105 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -39,7 +39,7 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn) } do { - ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, false, true, + ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN, gpa, PAGE_SIZE, false); if (ret) goto out; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f11039944c08..d044e328046a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1222,9 +1222,9 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); * @gpc: struct gfn_to_pfn_cache object. * @vcpu: vCPU to be used for marking pages dirty and to be woken on * invalidation. - * @guest_uses_pa: indicates that the resulting host physical PFN is used while - * @vcpu is IN_GUEST_MODE so invalidations should wake it. - * @kernel_map: requests a kernel virtual mapping (kmap / memremap). + * @usage: indicates if the resulting host physical PFN is used while + * the @vcpu is IN_GUEST_MODE and/or if the PFN used directly + * by KVM (and thus needs a kernel virtual mapping). * @gpa: guest physical address to map. * @len: sanity check; the range being access must fit a single page. * @dirty: mark the cache dirty immediately. @@ -1240,9 +1240,8 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); * to ensure that the cache is valid before accessing the target page. */ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, - struct kvm_vcpu *vcpu, bool guest_uses_pa, - bool kernel_map, gpa_t gpa, unsigned long len, - bool dirty); + struct kvm_vcpu *vcpu, enum pfn_cache_usage usage, + gpa_t gpa, unsigned long len, bool dirty); /** * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache. diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index dceac12c1ce5..784f37cbf33e 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -18,6 +18,7 @@ struct kvm_memslots; enum kvm_mr_change; +#include <linux/bits.h> #include <linux/types.h> #include <linux/spinlock_types.h> @@ -46,6 +47,12 @@ typedef u64 hfn_t; typedef hfn_t kvm_pfn_t; +enum pfn_cache_usage { + KVM_GUEST_USES_PFN = BIT(0), + KVM_HOST_USES_PFN = BIT(1), + KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN, +}; + struct gfn_to_hva_cache { u64 generation; gpa_t gpa; @@ -64,11 +71,10 @@ struct gfn_to_pfn_cache { rwlock_t lock; void *khva; kvm_pfn_t pfn; + enum pfn_cache_usage usage; bool active; bool valid; bool dirty; - bool kernel_map; - bool guest_uses_pa; }; #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index ce878f4be4da..9b3a192cb18c 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -42,7 +42,7 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, * If a guest vCPU could be using the physical address, * it needs to be woken. */ - if (gpc->guest_uses_pa) { + if (gpc->usage & KVM_GUEST_USES_PFN) { if (!wake_vcpus) { wake_vcpus = true; bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); @@ -219,7 +219,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, goto map_done; } - if (gpc->kernel_map) { + if (gpc->usage & KVM_HOST_USES_PFN) { if (new_pfn == old_pfn) { new_khva = old_khva; old_pfn = KVM_PFN_ERR_FAULT; @@ -299,10 +299,11 @@ EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap); int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, - struct kvm_vcpu *vcpu, bool guest_uses_pa, - bool kernel_map, gpa_t gpa, unsigned long len, - bool dirty) + struct kvm_vcpu *vcpu, enum pfn_cache_usage usage, + gpa_t gpa, unsigned long len, bool dirty) { + WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage); + if (!gpc->active) { rwlock_init(&gpc->lock); @@ -310,8 +311,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpc->pfn = KVM_PFN_ERR_FAULT; gpc->uhva = KVM_HVA_ERR_BAD; gpc->vcpu = vcpu; - gpc->kernel_map = kernel_map; - gpc->guest_uses_pa = guest_uses_pa; + gpc->usage = usage; gpc->valid = false; gpc->active = true; -- 2.33.1