RE: 16 vcpus, 200GB of memory boots!!!

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Jes Sorensen wrote:
> Zhang, Xiantao wrote:
>> Hi, Jes
>> 	I found the reason why qemu only supports 16 vcpus.  In
>> libkvm/kvm-common.h, the MAX_CPUS is defined as 16 for ia64 side.  We
>> should increase this value to 64 or big value you like.  Please have
>> a try on your mainframe.  Thanks!
>> Xiantao
> 
> Xiantao,
> 
> You are too fast :-)
> 
> With this change I can boot 60 vcpus on my system - thats the limit I
> get from KVM_MAX_VCPUS with your patch from yesterday.
> 

Great news!! To fix the slow issue at Qemu intilization stage, I have
wrote a patch which intends to allocate memory when it is on-demand.
Could you help to try it ? Thanks!
Xiantao

diff --git a/arch/ia64/include/asm/kvm_host.h
b/arch/ia64/include/asm/kvm_host.h
index e98f6f0..9f89806 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -39,6 +39,7 @@
 #define EXIT_REASON_EXTERNAL_INTERRUPT	6
 #define EXIT_REASON_IPI			7
 #define EXIT_REASON_PTC_G		8
+#define EXIT_REASON_ALLOC_MEM		9
 
 /*Define vmm address space and vm data space.*/
 #define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20)
@@ -308,6 +309,12 @@ struct kvm_ptc_g {
 	struct kvm_vcpu *vcpu;
 };
 
+/* Alloc real memory exit */
+struct kvm_alloc_mem {
+	unsigned long gpfn;
+	unsigned long pmt_val;
+};
+
 /*Exit control data */
 struct exit_ctl_data{
 	uint32_t exit_reason;
@@ -319,6 +326,7 @@ struct exit_ctl_data{
 		struct kvm_switch_rr6	rr_data;
 		struct kvm_ipi_data	ipi_data;
 		struct kvm_ptc_g	ptc_g_data;
+		struct kvm_alloc_mem    alloc_mem;
 	} u;
 };
 
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 1343781..b02bc03 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -212,6 +212,8 @@ static int handle_vm_error(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
 {
 	kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
 	kvm_run->hw.hardware_exit_reason = 1;
+	printk(KERN_ERR"KVM: VM error occurs!");
+
 	return 0;
 }
 
@@ -480,6 +482,40 @@ static int handle_external_interrupt(struct
kvm_vcpu *vcpu,
 	return 1;
 }
 
+static int handle_mem_alloc(struct kvm_vcpu *vcpu,
+		struct kvm_run *kvm_run)
+{
+	unsigned long pmt_val, gpfn, pfn, gpfn_off;
+	struct kvm_memory_slot *memslot;
+	struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
+
+	gpfn = p->u.alloc_mem.gpfn;
+	
+	spin_lock(&vcpu->kvm->mmu_lock);
+	pmt_val = kvm_get_pmt_entry(vcpu->kvm, gpfn);
+	if (!pmt_val) {
+
+		pfn = gfn_to_pfn(vcpu->kvm, gpfn);
+		if (!pfn_valid(pfn))
+			goto out;
+
+		kvm_set_pmt_entry(vcpu->kvm, gpfn, pfn << PAGE_SHIFT,
+						_PAGE_AR_RWX |
_PAGE_MA_WB);
+
+		memslot = gfn_to_memslot(vcpu->kvm, gpfn);
+		if (!memslot)
+			goto out;
+		gpfn_off =  gpfn - memslot->base_gfn;
+		memslot->rmap[gpfn_off] = (unsigned
long)pfn_to_page(pfn);
+		pmt_val = kvm_get_pmt_entry(vcpu->kvm, gpfn);
+	}
+out:
+	spin_unlock(&vcpu->kvm->mmu_lock);
+	p->u.alloc_mem.pmt_val = pmt_val;
+
+	return 1;
+}
+
 static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
 		struct kvm_run *kvm_run) = {
 	[EXIT_REASON_VM_PANIC]              = handle_vm_error,
@@ -491,6 +527,7 @@ static int (*kvm_vti_exit_handlers[])(struct
kvm_vcpu *vcpu,
 	[EXIT_REASON_EXTERNAL_INTERRUPT]    = handle_external_interrupt,
 	[EXIT_REASON_IPI]		    = handle_ipi,
 	[EXIT_REASON_PTC_G]		    = handle_global_purge,
+	[EXIT_REASON_ALLOC_MEM]		    = handle_mem_alloc,
 
 };
 
@@ -1454,26 +1491,24 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
 		struct kvm_memory_slot old,
 		int user_alloc)
 {
+
 	unsigned long i;
 	unsigned long pfn;
 	int npages = mem->memory_size >> PAGE_SHIFT;
 	struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
 	unsigned long base_gfn = memslot->base_gfn;
 
+	spin_lock(&kvm->mmu_lock);
 	for (i = 0; i < npages; i++) {
 		pfn = gfn_to_pfn(kvm, base_gfn + i);
-		if (!kvm_is_mmio_pfn(pfn)) {
-			kvm_set_pmt_entry(kvm, base_gfn + i,
-					pfn << PAGE_SHIFT,
-				_PAGE_AR_RWX | _PAGE_MA_WB);
-			memslot->rmap[i] = (unsigned
long)pfn_to_page(pfn);
-		} else {
+		if (kvm_is_mmio_pfn(pfn)) {
 			kvm_set_pmt_entry(kvm, base_gfn + i,
 					GPFN_PHYS_MMIO | (pfn <<
PAGE_SHIFT),
 					_PAGE_MA_UC);
-			memslot->rmap[i] = 0;
-			}
+		}
+		memslot->rmap[i] = 0;
 	}
+	spin_unlock(&kvm->mmu_lock);
 
 	return 0;
 }
diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h
index c2ad19a..f9a656b 100644
--- a/arch/ia64/kvm/misc.h
+++ b/arch/ia64/kvm/misc.h
@@ -41,6 +41,13 @@ static inline void kvm_set_pmt_entry(struct kvm *kvm,
gfn_t gfn,
 	pmt_base[gfn] = pte;
 }
 
+static inline uint64_t kvm_get_pmt_entry(struct kvm *kvm, gfn_t gfn)
+{
+	uint64_t *pmt_base = kvm_host_get_pmt(kvm);
+
+	return pmt_base[gfn];
+}
+
 /*Function for translating host address to guest address*/
 
 static inline void *to_guest(struct kvm *kvm, void *addr)
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 6b6307a..7d2a805 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -570,10 +570,37 @@ void thash_init(struct thash_cb *hcb, u64 sz)
 	}
 }
 
+static unsigned long alloc_real_maddr(unsigned long gpfn)
+{
+	struct exit_ctl_data *p = &current_vcpu->arch.exit_data;
+	unsigned long psr;
+
+	local_irq_save(psr);
+
+	p->exit_reason = EXIT_REASON_ALLOC_MEM;
+	p->u.alloc_mem.gpfn = gpfn;
+	p->u.alloc_mem.pmt_val = 0;
+	vmm_transition(current_vcpu);
+
+	local_irq_restore(psr);
+
+	return p->u.alloc_mem.pmt_val;
+}
+
 u64 kvm_get_mpt_entry(u64 gpfn)
 {
 	u64 *base = (u64 *) KVM_P2M_BASE;
-	return *(base + gpfn);
+	u64 pmt_val = *(base + gpfn);
+	
+	if (!pmt_val) {
+		pmt_val = alloc_real_maddr(gpfn);
+		if (!pmt_val) {
+			//printk(KERN_ERR"kvm: NO Enough memory!\n");
+			panic_vm(current_vcpu);
+		}
+		
+	}
+	return pmt_val;
 }
 
 u64 kvm_lookup_mpa(u64 gpfn)


> I am going to look into making this go further! :-)


> Regards,
> Jes

Attachment: allocate_on_demand.patch
Description: allocate_on_demand.patch


[Index of Archives]     [Linux KVM Devel]     [Linux Virtualization]     [Big List of Linux Books]     [Linux SCSI]     [Yosemite Forum]

  Powered by Linux