On Sun, May 16, 2021, Maciej S. Szmigiero wrote: > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > index 8895b95b6a22..3c40c7d32f7e 100644 > --- a/include/linux/kvm_host.h > +++ b/include/linux/kvm_host.h > @@ -1091,10 +1091,14 @@ bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); > * gfn_to_memslot() itself isn't here as an inline because that would > * bloat other code too much. > * > + * With "approx" set returns the memslot also when the address falls > + * in a hole. In that case one of the memslots bordering the hole is > + * returned. > + * > * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! > */ > static inline struct kvm_memory_slot * > -search_memslots(struct kvm_memslots *slots, gfn_t gfn) > +search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx) An alternative to modifying the PPC code would be to make the existing search_memslots() a wrapper to __search_memslots(), with the latter taking @approx. We might also want to make this __always_inline to improve the likelihood of the compiler optimizing away @approx. I doubt it matters in practice... > { > int start = 0, end = slots->used_slots; > int slot = atomic_read(&slots->lru_slot); > @@ -1116,19 +1120,22 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn) > start = slot + 1; > } > > + if (approx && start >= slots->used_slots) > + return &memslots[slots->used_slots - 1]; > + > if (start < slots->used_slots && gfn >= memslots[start].base_gfn && > gfn < memslots[start].base_gfn + memslots[start].npages) { > atomic_set(&slots->lru_slot, start); > return &memslots[start]; > } > > - return NULL; > + return approx ? &memslots[start] : NULL; > } > > static inline struct kvm_memory_slot * > __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) > { > - return search_memslots(slots, gfn); > + return search_memslots(slots, gfn, false); > } > > static inline unsigned long