On Tue, Feb 27, 2024, David Woodhouse wrote: > static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva, > unsigned long len) > { > struct kvm *kvm = gpc->kvm; > + int ret; > + > + mutex_lock(&gpc->refresh_lock); > > if (!gpc->active) { > if (KVM_BUG_ON(gpc->valid, kvm)) Heh, it's _just_ out of sight in this diff, but this has an error path that misses the unlock. The full context is: if (!gpc->active) { if (KVM_BUG_ON(gpc->valid, kvm)) return -EIO; At the risk of doing too much when applying, I think this is the perfect patch to start introducing use of guard(mutex) in common KVM. As a bonus, it's a noticeably smaller diff. Testing this now... Signed-off-by: David Woodhouse <dwmw@xxxxxxxxxxxx> Reviewed-by: Paul Durrant <paul@xxxxxxx> Link: https://lore.kernel.org/r/20240227115648.3104-5-dwmw2@xxxxxxxxxxxxx [sean: use guard(mutex) to fix a missed unlock] Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- virt/kvm/pfncache.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index 9ac8c9da4eda..4e07112a24c2 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -256,12 +256,7 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l if (page_offset + len > PAGE_SIZE) return -EINVAL; - /* - * If another task is refreshing the cache, wait for it to complete. - * There is no guarantee that concurrent refreshes will see the same - * gpa, memslots generation, etc..., so they must be fully serialized. - */ - mutex_lock(&gpc->refresh_lock); + lockdep_assert_held(&gpc->refresh_lock); write_lock_irq(&gpc->lock); @@ -347,8 +342,6 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l out_unlock: write_unlock_irq(&gpc->lock); - mutex_unlock(&gpc->refresh_lock); - if (unmap_old) gpc_unmap(old_pfn, old_khva); @@ -357,13 +350,16 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned l int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len) { + unsigned long uhva; + + guard(mutex)(&gpc->refresh_lock); + /* * If the GPA is valid then ignore the HVA, as a cache can be GPA-based * or HVA-based, not both. For GPA-based caches, the HVA will be * recomputed during refresh if necessary. */ - unsigned long uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : - KVM_HVA_ERR_BAD; + uhva = kvm_is_error_gpa(gpc->gpa) ? gpc->uhva : KVM_HVA_ERR_BAD; return __kvm_gpc_refresh(gpc, gpc->gpa, uhva, len); } @@ -377,6 +373,7 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm) gpc->pfn = KVM_PFN_ERR_FAULT; gpc->gpa = INVALID_GPA; gpc->uhva = KVM_HVA_ERR_BAD; + gpc->active = gpc->valid = false; } static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long uhva, @@ -384,6 +381,8 @@ static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned { struct kvm *kvm = gpc->kvm; + guard(mutex)(&gpc->refresh_lock); + if (!gpc->active) { if (KVM_BUG_ON(gpc->valid, kvm)) return -EIO; @@ -420,6 +419,8 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc) kvm_pfn_t old_pfn; void *old_khva; + guard(mutex)(&gpc->refresh_lock); + if (gpc->active) { /* * Deactivate the cache before removing it from the list, KVM base-commit: 6d6f106db109251010423d8728d76a0260db5814 --