When private/shared memory are mixed in a large page, the lpage_info may not be accurate and should be updated with this mixed info. A large page has mixed pages can't be really mapped as large page since its private/shared pages are from different physical memory. This patch updates lpage_info when private/shared memory attribute is changed. If both private and shared pages are within a large page region, it can't be mapped as large page. It's a bit challenge to track the mixed info in a 'count' like variable, this patch instead reserves a bit in disallow_lpage to indicate a large page include mixed private/share pages. Signed-off-by: Chao Peng <chao.p.peng@xxxxxxxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 8 +++ arch/x86/kvm/mmu/mmu.c | 119 +++++++++++++++++++++++++++++++- arch/x86/kvm/x86.c | 2 + include/linux/kvm_host.h | 17 +++++ virt/kvm/kvm_main.c | 11 ++- 5 files changed, 154 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index cfad6ba1a70a..85119ed9527a 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -38,6 +38,7 @@ #define __KVM_HAVE_ARCH_VCPU_DEBUGFS #define __KVM_HAVE_ZAP_GFN_RANGE +#define __KVM_HAVE_ARCH_UPDATE_MEM_ATTR #define KVM_MAX_VCPUS 1024 @@ -945,6 +946,13 @@ struct kvm_vcpu_arch { #endif }; +/* + * Use a bit in disallow_lpage to indicate private/shared pages mixed at the + * level. The remaining bits will be used as a reference count for other users. + */ +#define KVM_LPAGE_PRIVATE_SHARED_MIXED (1U << 31) +#define KVM_LPAGE_COUNT_MAX ((1U << 31) - 1) + struct kvm_lpage_info { int disallow_lpage; }; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 08abad4f3e6f..a0f198cede3d 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -762,11 +762,16 @@ static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot, { struct kvm_lpage_info *linfo; int i; + int disallow_count; for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { linfo = lpage_info_slot(gfn, slot, i); + + disallow_count = linfo->disallow_lpage & KVM_LPAGE_COUNT_MAX; + WARN_ON(disallow_count + count < 0 || + disallow_count > KVM_LPAGE_COUNT_MAX - count); + linfo->disallow_lpage += count; - WARN_ON(linfo->disallow_lpage < 0); } } @@ -6894,3 +6899,115 @@ void kvm_mmu_pre_destroy_vm(struct kvm *kvm) if (kvm->arch.nx_lpage_recovery_thread) kthread_stop(kvm->arch.nx_lpage_recovery_thread); } + +static bool mem_attr_is_mixed(struct kvm *kvm, unsigned int attr, + gfn_t start, gfn_t end) +{ + XA_STATE(xas, &kvm->mem_attr_array, start); + gfn_t gfn = start; + void *entry; + bool shared, private; + bool mixed = false; + + if (attr == KVM_MEM_ATTR_SHARED) { + shared = true; + private = false; + } else { + shared = false; + private = true; + } + + rcu_read_lock(); + entry = xas_load(&xas); + while (gfn < end) { + if (xas_retry(&xas, entry)) + continue; + + KVM_BUG_ON(gfn != xas.xa_index, kvm); + + if (entry) + private = true; + else + shared = true; + + if (private && shared) { + mixed = true; + goto out; + } + + entry = xas_next(&xas); + gfn++; + } +out: + rcu_read_unlock(); + return mixed; +} + +static inline void update_mixed(struct kvm_lpage_info *linfo, bool mixed) +{ + if (mixed) + linfo->disallow_lpage |= KVM_LPAGE_PRIVATE_SHARED_MIXED; + else + linfo->disallow_lpage &= ~KVM_LPAGE_PRIVATE_SHARED_MIXED; +} + +static void update_mem_lpage_info(struct kvm *kvm, + struct kvm_memory_slot *slot, + unsigned int attr, + gfn_t start, gfn_t end) +{ + unsigned long lpage_start, lpage_end; + unsigned long gfn, pages, mask; + int level; + + for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) { + pages = KVM_PAGES_PER_HPAGE(level); + mask = ~(pages - 1); + lpage_start = start & mask; + lpage_end = (end - 1) & mask; + + /* + * We only need to scan the head and tail page, for middle pages + * we know they are not mixed. + */ + update_mixed(lpage_info_slot(lpage_start, slot, level), + mem_attr_is_mixed(kvm, attr, lpage_start, + lpage_start + pages)); + + if (lpage_start == lpage_end) + return; + + for (gfn = lpage_start + pages; gfn < lpage_end; gfn += pages) + update_mixed(lpage_info_slot(gfn, slot, level), false); + + update_mixed(lpage_info_slot(lpage_end, slot, level), + mem_attr_is_mixed(kvm, attr, lpage_end, + lpage_end + pages)); + } +} + +void kvm_arch_update_mem_attr(struct kvm *kvm, unsigned int attr, + gfn_t start, gfn_t end) +{ + struct kvm_memory_slot *slot; + struct kvm_memslots *slots; + struct kvm_memslot_iter iter; + int i; + + WARN_ONCE(!(attr & (KVM_MEM_ATTR_PRIVATE | KVM_MEM_ATTR_SHARED)), + "Unsupported mem attribute.\n"); + + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + slots = __kvm_memslots(kvm, i); + + kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) { + slot = iter.slot; + start = max(start, slot->base_gfn); + end = min(end, slot->base_gfn + slot->npages); + if (WARN_ON_ONCE(start >= end)) + continue; + + update_mem_lpage_info(kvm, slot, attr, start, end); + } + } +} diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 081f62ccc9a1..ef11cda6f13f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -12321,6 +12321,8 @@ static int kvm_alloc_memslot_metadata(struct kvm *kvm, if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) linfo[lpages - 1].disallow_lpage = 1; ugfn = slot->userspace_addr >> PAGE_SHIFT; + if (kvm_slot_can_be_private(slot)) + ugfn |= slot->private_offset >> PAGE_SHIFT; /* * If the gfn and userspace address are not aligned wrt each * other, disable large page support for this slot. diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d65690cae80b..fd36ce6597ad 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2277,4 +2277,21 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) /* Max number of entries allowed for each kvm dirty ring */ #define KVM_DIRTY_RING_MAX_ENTRIES 65536 +#ifdef CONFIG_HAVE_KVM_PRIVATE_MEM + +#define KVM_MEM_ATTR_SHARED 0x0001 +#define KVM_MEM_ATTR_PRIVATE 0x0002 + +#ifdef __KVM_HAVE_ARCH_UPDATE_MEM_ATTR +void kvm_arch_update_mem_attr(struct kvm *kvm, unsigned int attr, + gfn_t start, gfn_t end); +#else +static inline void kvm_arch_update_mem_attr(struct kvm *kvm, unsigned int attr, + gfn_t start, gfn_t end) +{ +} +#endif + +#endif /* CONFIG_HAVE_KVM_PRIVATE_MEM */ + #endif diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index de5cce8c82c7..97d893f7482c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -938,13 +938,13 @@ static int kvm_init_mmu_notifier(struct kvm *kvm) #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ #ifdef CONFIG_HAVE_KVM_PRIVATE_MEM -#define KVM_MEM_ATTR_SHARED 0x0001 static int kvm_vm_ioctl_set_mem_attr(struct kvm *kvm, gpa_t gpa, gpa_t size, bool is_private) { gfn_t start, end; unsigned long index; void *entry; + int attr; int r; if (size == 0 || gpa + size < gpa) @@ -959,7 +959,13 @@ static int kvm_vm_ioctl_set_mem_attr(struct kvm *kvm, gpa_t gpa, gpa_t size, * Guest memory defaults to private, kvm->mem_attr_array only stores * shared memory. */ - entry = is_private ? NULL : xa_mk_value(KVM_MEM_ATTR_SHARED); + if (is_private) { + attr = KVM_MEM_ATTR_PRIVATE; + entry = NULL; + } else { + attr = KVM_MEM_ATTR_SHARED; + entry = xa_mk_value(KVM_MEM_ATTR_SHARED); + } for (index = start; index < end; index++) { r = xa_err(xa_store(&kvm->mem_attr_array, index, entry, @@ -969,6 +975,7 @@ static int kvm_vm_ioctl_set_mem_attr(struct kvm *kvm, gpa_t gpa, gpa_t size, } kvm_zap_gfn_range(kvm, start, end); + kvm_arch_update_mem_attr(kvm, attr, start, end); return r; err: -- 2.25.1