On 12/08/21 07:07, Sean Christopherson wrote:
@@ -739,8 +749,16 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, bool can_yield, bool flush,
bool shared)
{
+ bool zap_all = (end == ZAP_ALL_END);
struct tdp_iter iter;
+ /*
+ * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
+ * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
+ * and so KVM will never install a SPTE for such addresses.
+ */
+ end = min(end, 1ULL << (shadow_phys_bits - PAGE_SHIFT));
Then zap_all need not have any magic value. You can use 0/-1ull, it's
readable enough. ZAP_ALL_END is also unnecessary here if you do:
gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
bool zap_all = (start == 0 && end >= max_gfn_host);
end = min(end, max_gfn_host);
And as a small commit message nit, I would say "don't leak" instead of
"don't skip", since that's really the effect.
Paolo
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
rcu_read_lock();
@@ -759,9 +777,10 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
/*
* If this is a non-last-level SPTE that covers a larger range
* than should be zapped, continue, and zap the mappings at a
- * lower level.
+ * lower level, except when zapping all SPTEs.
*/
- if ((iter.gfn < start ||
+ if (!zap_all &&
+ (iter.gfn < start ||
iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&