[patch] allow dynamic sizing the kvm vmm area on ia64

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi,

This patch allows dynamically sizing the vmm area on ia64. I have left
it with the same size as we used to have with the constants, but it
seems to work going higher. I put in an artificial limit of 64 vcpus for
now anyway as scaling wise it doesn't work well going higher anyway.....
we'll get there :-)

Cheers,
Jes



Make KVM_VM_DATA_SHIFT a variable, making it possible allocate a
larger vm data area, making more space for the struct vcpu_data array.
This allows for increasing the number of vcpus a guest can support.

Signed-off-by: Jes Sorensen <jes@xxxxxxx>

---
 arch/ia64/include/asm/kvm_host.h |   14 +++++++++-----
 arch/ia64/kvm/kvm-ia64.c         |   32 ++++++++++++++++----------------
 2 files changed, 25 insertions(+), 21 deletions(-)

Index: linux-2.6.git/arch/ia64/include/asm/kvm_host.h
===================================================================
--- linux-2.6.git.orig/arch/ia64/include/asm/kvm_host.h
+++ linux-2.6.git/arch/ia64/include/asm/kvm_host.h
@@ -101,8 +101,7 @@
 #define KVM_VM_DATA_OFFSET_SHIFT	30
 #define KVM_VM_DATA_OFFSET	(__IA64_UL_CONST(1) << KVM_VM_DATA_OFFSET_SHIFT)
 
-#define KVM_VM_DATA_SHIFT	26
-#define KVM_VM_DATA_SIZE	(__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT)
+#define KVM_VM_DATA_SIZE	(__IA64_UL_CONST(1) << kvm_vm_data_shift)
 #define KVM_VM_DATA_BASE	(KVM_VMM_BASE + KVM_VM_DATA_OFFSET)
 
 #define KVM_P2M_BASE		KVM_VM_DATA_BASE
@@ -132,9 +131,14 @@
 
 #ifndef __ASSEMBLY__
 
+extern int kvm_vm_data_shift;
+
 /*Define the max vcpus and memory for Guests.*/
-#define KVM_MAX_VCPUS	(KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\
-			KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data)
+#define KVM_MAX_POSSIBLE_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - \
+				KVM_VM_STRUCT_SIZE - KVM_MEM_DIRTY_LOG_SIZE)/ \
+				sizeof(struct kvm_vcpu_data)
+#define KVM_MAX_VCPUS		64
+
 #define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT)
 
 #define VMM_LOG_LEN 256
@@ -160,7 +164,7 @@
 	char kvm_p2m[KVM_P2M_SIZE];
 	char kvm_vm_struct[KVM_VM_STRUCT_SIZE];
 	char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE];
-	struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS];
+	struct kvm_vcpu_data vcpu_data[0];
 };
 
 #define VCPU_BASE(n)	(KVM_VM_DATA_BASE + \
Index: linux-2.6.git/arch/ia64/kvm/kvm-ia64.c
===================================================================
--- linux-2.6.git.orig/arch/ia64/kvm/kvm-ia64.c
+++ linux-2.6.git/arch/ia64/kvm/kvm-ia64.c
@@ -54,6 +54,7 @@
 static unsigned long kvm_vm_buffer;
 static unsigned long kvm_vm_buffer_size;
 unsigned long kvm_vmm_gp;
+int kvm_vm_data_shift = 26;	/* currently 26 limits us to 156 vcpus */
 
 static long vp_env_info;
 
@@ -583,8 +584,7 @@
 	vcpu->arch.vmm_tr_slot = r;
 	/*Insert a pairt of tr to map data of vm*/
 	pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
-	r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
-					pte, KVM_VM_DATA_SHIFT);
+	r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE, pte, kvm_vm_data_shift);
 	if (r < 0)
 		goto out;
 	vcpu->arch.vm_tr_slot = r;
@@ -749,14 +749,14 @@
 
 	BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
 
-	vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
+	vm_base = __get_free_pages(GFP_KERNEL, kvm_vm_data_shift - PAGE_SHIFT);
 
 	if (!vm_base)
 		return ERR_PTR(-ENOMEM);
 
-	memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
+	memset((void *)vm_base, 0, (1 << kvm_vm_data_shift));
 	kvm = (struct kvm *)(vm_base +
-			offsetof(struct kvm_vm_data, kvm_vm_struct));
+			     offsetof(struct kvm_vm_data, kvm_vm_struct));
 	kvm->arch.vm_base = vm_base;
 	printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
 
@@ -1039,7 +1039,8 @@
 {
 	if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
 		kvm_vmm_base = __get_free_pages(GFP_KERNEL,
-				get_order(KVM_VMM_SIZE));
+						KVM_VMM_SHIFT - PAGE_SHIFT);
+
 		if (!kvm_vmm_base)
 			return -ENOMEM;
 
@@ -1286,20 +1287,19 @@
 	return r;
 }
 
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
-		unsigned int id)
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 {
 	struct kvm_vcpu *vcpu;
 	unsigned long vm_base = kvm->arch.vm_base;
-	int r;
-	int cpu;
+	int r, cpu, max_cpu;
 
 	BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
 
 	r = -EINVAL;
-	if (id >= KVM_MAX_VCPUS) {
-		printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
-				KVM_MAX_VCPUS);
+
+	max_cpu = min_t(long, KVM_MAX_VCPUS, KVM_MAX_POSSIBLE_VCPUS);
+	if (id >= max_cpu) {
+		printk(KERN_ERR "kvm: Can't configure vcpus > %i", max_cpu);
 		goto fail;
 	}
 
@@ -1353,12 +1353,12 @@
 static void free_kvm(struct kvm *kvm)
 {
 	unsigned long vm_base = kvm->arch.vm_base;
+	unsigned long vm_size = 1UL << kvm_vm_data_shift;
 
 	if (vm_base) {
-		memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
-		free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
+		memset((void *)vm_base, 0, vm_size);
+		free_pages(vm_base, kvm_vm_data_shift - PAGE_SHIFT);
 	}
-
 }
 
 static void kvm_release_vm_pages(struct kvm *kvm)

[Index of Archives]     [Linux KVM Devel]     [Linux Virtualization]     [Big List of Linux Books]     [Linux SCSI]     [Yosemite Forum]

  Powered by Linux