On Thu, Apr 08, 2021, Paolo Bonzini wrote: > On 08/04/21 17:48, Sean Christopherson wrote: > > Freaking PDPTRs. I was really hoping we could keep the lock and pages_available() > > logic outside of the helpers. What if kvm_mmu_load() reads the PDPTRs and > > passes them into mmu_alloc_shadow_roots()? Or is that too ugly? > > The patch I have posted (though untested) tries to do that in a slightly > less ugly way by pushing make_mmu_pages_available down to mmu_alloc_*_roots. Yeah, I agree it's less ugly. It would be nice to not duplicate that code, but it's probably not worth the ugliness. :-/ For your approach, can we put the out label after the success path? Setting mmu->root_pgd isn't wrong per se, but doing so might mislead future readers into thinking that it's functionally necessary. diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index efb41f31e80a..93f97d0a9e2e 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3244,6 +3244,13 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) u8 shadow_root_level = mmu->shadow_root_level; hpa_t root; unsigned i; + int r; + + write_lock(&vcpu->kvm->mmu_lock); + + r = make_mmu_pages_available(vcpu); + if (r) + goto out_unlock; if (is_tdp_mmu_enabled(vcpu->kvm)) { root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu); @@ -3252,8 +3259,10 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true); mmu->root_hpa = root; } else if (shadow_root_level == PT32E_ROOT_LEVEL) { - if (WARN_ON_ONCE(!mmu->pae_root)) - return -EIO; + if (WARN_ON_ONCE(!mmu->pae_root)) { + r = -EIO; + goto out_unlock; + } for (i = 0; i < 4; ++i) { WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i])); @@ -3266,13 +3275,15 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) mmu->root_hpa = __pa(mmu->pae_root); } else { WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level); - return -EIO; + r = -EIO; + goto out_unlock; } /* root_pgd is ignored for direct MMUs. */ mmu->root_pgd = 0; - - return 0; +out_unlock: + write_unlock(&vcpu->kvm->mmu_lock); + return r; } static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) @@ -3281,7 +3292,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) u64 pdptrs[4], pm_mask; gfn_t root_gfn, root_pgd; hpa_t root; - int i; + int i, r; root_pgd = mmu->get_guest_pgd(vcpu); root_gfn = root_pgd >> PAGE_SHIFT; @@ -3289,6 +3300,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) if (mmu_check_root(vcpu, root_gfn)) return 1; + /* + * On SVM, reading PDPTRs might access guest memory, which might fault + * and thus might sleep. Grab the PDPTRs before acquiring mmu_lock. + */ if (mmu->root_level == PT32E_ROOT_LEVEL) { for (i = 0; i < 4; ++i) { pdptrs[i] = mmu->get_pdptr(vcpu, i); @@ -3300,6 +3315,12 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) } } + write_lock(&vcpu->kvm->mmu_lock); + + r = make_mmu_pages_available(vcpu); + if (r) + goto out_unlock; + /* * Do we shadow a long mode page table? If so we need to * write-protect the guests page table root. @@ -3311,8 +3332,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) goto set_root_pgd; } - if (WARN_ON_ONCE(!mmu->pae_root)) - return -EIO; + if (WARN_ON_ONCE(!mmu->pae_root)) { + r = -EIO; + goto out_unlock; + } /* * We shadow a 32 bit page table. This may be a legacy 2-level @@ -3323,8 +3346,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) { pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; - if (WARN_ON_ONCE(!mmu->lm_root)) - return -EIO; + if (WARN_ON_ONCE(!mmu->lm_root)) { + r = -EIO; + goto out_unlock; + } mmu->lm_root[0] = __pa(mmu->pae_root) | pm_mask; } @@ -3352,8 +3377,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) set_root_pgd: mmu->root_pgd = root_pgd; - - return 0; +out_unlock: + write_unlock(&vcpu->kvm->mmu_lock); + return r; } static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu) @@ -4852,14 +4878,10 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) r = mmu_alloc_special_roots(vcpu); if (r) goto out; - write_lock(&vcpu->kvm->mmu_lock); - if (make_mmu_pages_available(vcpu)) - r = -ENOSPC; - else if (vcpu->arch.mmu->direct_map) + if (vcpu->arch.mmu->direct_map) r = mmu_alloc_direct_roots(vcpu); else r = mmu_alloc_shadow_roots(vcpu); - write_unlock(&vcpu->kvm->mmu_lock); if (r) goto out;