From: Yulei Zhang <yuleixzhang@xxxxxxxxxxx> Signed-off-by: Yulei Zhang <yuleixzhang@xxxxxxxxxxx> --- arch/x86/kvm/mmu/mmu.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index f963a3b0500f..bad01f66983d 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1775,6 +1775,9 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, int i; bool write_protected = false; + if (kvm->arch.global_root_hpa) + return write_protected; + for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) { rmap_head = __gfn_to_rmap(gfn, i, slot); write_protected |= __rmap_write_protect(kvm, rmap_head, true); @@ -5835,6 +5838,9 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm) */ static void kvm_mmu_zap_all_fast(struct kvm *kvm) { + if (kvm->arch.global_root_hpa) + return; + lockdep_assert_held(&kvm->slots_lock); spin_lock(&kvm->mmu_lock); @@ -5897,6 +5903,9 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) struct kvm_memory_slot *memslot; int i; + if (kvm->arch.global_root_hpa) + return; + spin_lock(&kvm->mmu_lock); for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { slots = __kvm_memslots(kvm, i); -- 2.17.1