On Tue, 2023-11-21 at 18:02 +0000, Paul Durrant wrote: > > -static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, > +static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, u64 addr, bool addr_is_gpa, > unsigned long len) > { > struct kvm_memslots *slots = kvm_memslots(gpc->kvm); > - unsigned long page_offset = offset_in_page(gpa); > + unsigned long page_offset = offset_in_page(addr); > bool unmap_old = false; > kvm_pfn_t old_pfn; > bool hva_change = false; > @@ -244,12 +244,21 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, > old_pfn = gpc->pfn; > old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); > > - /* If the userspace HVA is invalid, refresh that first */ > - if (gpc->gpa != gpa || gpc->generation != slots->generation || > - kvm_is_error_hva(gpc->uhva)) { > - gfn_t gfn = gpa_to_gfn(gpa); > + if (!addr_is_gpa) { > + gpc->gpa = KVM_XEN_INVALID_GPA; > + gpc->uhva = PAGE_ALIGN_DOWN(gpc->uhva); > + addr = PAGE_ALIGN_DOWN(addr); > + > + if (gpc->uhva != addr) { > + gpc->uhva = addr; > + hva_change = true; > + } > + } else if (gpc->gpa != addr || > + gpc->generation != slots->generation || > + kvm_is_error_hva(gpc->uhva)) { > + gfn_t gfn = gpa_to_gfn(addr); > > - gpc->gpa = gpa; > + gpc->gpa = addr; > gpc->generation = slots->generation; > gpc->memslot = __gfn_to_memslot(slots, gfn); > gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); Hrm, now that a previous patch means we're preserving the low bits of gpc->uhva surely you don't *need* to mess with the gpc struct? If gpc->gpa == KVM_XEN_INVALID_GPA (but gpc->uhva != KVM_ERR_ERR_BAD && gpc->active) surely that's enough to signal that gpc->uhva is canonical and doesn't need to be looked up from the GPA? And I think that means the 'bool addr_is_gpa' argument can go away from __kvm_gpc_refresh(); you can set it up in {__,}kvm_gpc_activate*() instead?
Attachment:
smime.p7s
Description: S/MIME cryptographic signature