If the TDP MMU is in use, wait to allocate the rmaps until the shadow MMU is actually used. (i.e. a nested VM is launched.) This saves memory equal to 0.2% of guest memory in cases where the TDP MMU is used and there are no nested guests involved. Signed-off-by: Ben Gardon <bgardon@xxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/mmu/mmu.c | 14 ++++++++++--- arch/x86/kvm/mmu/tdp_mmu.c | 6 ++++-- arch/x86/kvm/mmu/tdp_mmu.h | 4 ++-- arch/x86/kvm/x86.c | 37 ++++++++++++++++++++++++++++++++- 5 files changed, 54 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 00065f9bbc5e..7b8e1532fb55 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1860,5 +1860,6 @@ static inline int kvm_cpu_get_apicid(int mps_cpu) int kvm_cpu_dirty_log_size(void); inline bool kvm_memslots_have_rmaps(struct kvm *kvm); +int alloc_all_memslots_rmaps(struct kvm *kvm); #endif /* _ASM_X86_KVM_HOST_H */ diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 48067c572c02..e3a3b65829c5 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3306,6 +3306,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) } } + r = alloc_all_memslots_rmaps(vcpu->kvm); + if (r) + return r; + write_lock(&vcpu->kvm->mmu_lock); r = make_mmu_pages_available(vcpu); if (r < 0) @@ -5494,9 +5498,13 @@ void kvm_mmu_init_vm(struct kvm *kvm) { struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; - kvm_mmu_init_tdp_mmu(kvm); - - kvm->arch.memslots_have_rmaps = true; + if (!kvm_mmu_init_tdp_mmu(kvm)) + /* + * No smp_load/store wrappers needed here as we are in + * VM init and there cannot be any memslots / other threads + * accessing this struct kvm yet. + */ + kvm->arch.memslots_have_rmaps = true; node->track_write = kvm_mmu_pte_write; node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot; diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 83cbdbe5de5a..5342aca2c8e0 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -14,10 +14,10 @@ static bool __read_mostly tdp_mmu_enabled = false; module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644); /* Initializes the TDP MMU for the VM, if enabled. */ -void kvm_mmu_init_tdp_mmu(struct kvm *kvm) +bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled)) - return; + return false; /* This should not be changed for the lifetime of the VM. */ kvm->arch.tdp_mmu_enabled = true; @@ -25,6 +25,8 @@ void kvm_mmu_init_tdp_mmu(struct kvm *kvm) INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots); spin_lock_init(&kvm->arch.tdp_mmu_pages_lock); INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages); + + return true; } static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index 5fdf63090451..b046ab5137a1 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -80,12 +80,12 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level); #ifdef CONFIG_X86_64 -void kvm_mmu_init_tdp_mmu(struct kvm *kvm); +bool kvm_mmu_init_tdp_mmu(struct kvm *kvm); void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; } static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } #else -static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {} +static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; } static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {} static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; } static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1098ab73a704..95e74fb9fc20 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10868,9 +10868,44 @@ static int alloc_memslot_rmap(struct kvm_memory_slot *slot, return -ENOMEM; } +int alloc_all_memslots_rmaps(struct kvm *kvm) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *slot; + int r = 0; + int i; + + if (kvm_memslots_have_rmaps(kvm)) + return 0; + + mutex_lock(&kvm->slots_arch_lock); + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + slots = __kvm_memslots(kvm, i); + kvm_for_each_memslot(slot, slots) { + r = alloc_memslot_rmap(slot, slot->npages); + if (r) { + mutex_unlock(&kvm->slots_arch_lock); + return r; + } + } + } + + /* + * memslots_have_rmaps is set and read in different lock contexts, + * so protect it with smp_load/store. + */ + smp_store_release(&kvm->arch.memslots_have_rmaps, true); + mutex_unlock(&kvm->slots_arch_lock); + return 0; +} + bool kvm_memslots_have_rmaps(struct kvm *kvm) { - return kvm->arch.memslots_have_rmaps; + /* + * memslots_have_rmaps is set and read in different lock contexts, + * so protect it with smp_load/store. + */ + return smp_load_acquire(&kvm->arch.memslots_have_rmaps); } static int kvm_alloc_memslot_metadata(struct kvm *kvm, -- 2.31.1.607.g51e8a6a459-goog