[PATCH 1/5] KVM: Make the maximum number of user memslots a per-VM thing

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Limiting the maximum number of user memslots globally can be undesirable as
different VMs may have different needs. Generally, a relatively small
number should suffice and a VMM may want to enforce the limitation so a VM
won't accidentally eat too much memory. On the other hand, the number of
required memslots can depend on the number of assigned vCPUs, e.g. each
Hyper-V SynIC may require up to two additional slots per vCPU.

Prepare to limit the maximum number of user memslots per-VM. No real
functional change in this patch as the limit is still hard-coded to
KVM_USER_MEM_SLOTS.

Suggested-by: Sean Christopherson <seanjc@xxxxxxxxxx>
Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
---
 arch/powerpc/kvm/book3s_hv.c |  2 +-
 arch/s390/kvm/kvm-s390.c     |  2 +-
 include/linux/kvm_host.h     |  1 +
 virt/kvm/dirty_ring.c        |  2 +-
 virt/kvm/kvm_main.c          | 11 ++++++-----
 5 files changed, 10 insertions(+), 8 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 6f612d240392..bea2f34e3662 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4472,7 +4472,7 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
 	mutex_lock(&kvm->slots_lock);
 
 	r = -EINVAL;
-	if (log->slot >= KVM_USER_MEM_SLOTS)
+	if (log->slot >= kvm->memslots_max)
 		goto out;
 
 	slots = kvm_memslots(kvm);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index dbafd057ca6a..b8c49105f40c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -640,7 +640,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 	mutex_lock(&kvm->slots_lock);
 
 	r = -EINVAL;
-	if (log->slot >= KVM_USER_MEM_SLOTS)
+	if (log->slot >= kvm->memslots_max)
 		goto out;
 
 	r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f3b1013fb22c..0033ccffe617 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -513,6 +513,7 @@ struct kvm {
 	pid_t userspace_pid;
 	unsigned int max_halt_poll_ns;
 	u32 dirty_ring_size;
+	short int memslots_max;
 };
 
 #define kvm_err(fmt, ...) \
diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c
index 9d01299563ee..40d0a749a55d 100644
--- a/virt/kvm/dirty_ring.c
+++ b/virt/kvm/dirty_ring.c
@@ -52,7 +52,7 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
 	as_id = slot >> 16;
 	id = (u16)slot;
 
-	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
+	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= kvm->memslots_max)
 		return;
 
 	memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 8367d88ce39b..a78e982e7107 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -755,6 +755,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
 	INIT_LIST_HEAD(&kvm->devices);
 
 	BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
+	kvm->memslots_max = KVM_USER_MEM_SLOTS;
 
 	if (init_srcu_struct(&kvm->srcu))
 		goto out_err_no_srcu;
@@ -1404,7 +1405,7 @@ EXPORT_SYMBOL_GPL(kvm_set_memory_region);
 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
 					  struct kvm_userspace_memory_region *mem)
 {
-	if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
+	if ((u16)mem->slot >= kvm->memslots_max)
 		return -EINVAL;
 
 	return kvm_set_memory_region(kvm, mem);
@@ -1435,7 +1436,7 @@ int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
 
 	as_id = log->slot >> 16;
 	id = (u16)log->slot;
-	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
+	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= kvm->memslots_max)
 		return -EINVAL;
 
 	slots = __kvm_memslots(kvm, as_id);
@@ -1497,7 +1498,7 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
 
 	as_id = log->slot >> 16;
 	id = (u16)log->slot;
-	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
+	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= kvm->memslots_max)
 		return -EINVAL;
 
 	slots = __kvm_memslots(kvm, as_id);
@@ -1609,7 +1610,7 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm,
 
 	as_id = log->slot >> 16;
 	id = (u16)log->slot;
-	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
+	if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= kvm->memslots_max)
 		return -EINVAL;
 
 	if (log->first_page & 63)
@@ -3682,7 +3683,7 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
 		return KVM_ADDRESS_SPACE_NUM;
 #endif
 	case KVM_CAP_NR_MEMSLOTS:
-		return KVM_USER_MEM_SLOTS;
+		return kvm ? kvm->memslots_max : KVM_USER_MEM_SLOTS;
 	case KVM_CAP_DIRTY_LOG_RING:
 #if KVM_DIRTY_LOG_PAGE_OFFSET > 0
 		return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
-- 
2.29.2




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux