[PATCH] kvm/ia64: Lazy memory allocation for kvm/ia64

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi, Jes
  Could you help to verify the patch on you mainframe?  As you said,
when guests configures with more that 200G ram,  it will cost 20 minutes
to initilize qemu. In this patch, I changed the logic of memory
allocation, and I think time spent on initilization stage should be
reduce to less than 1min. If so, it should resolve the issue you met. 
Thanks
Xiantao

>From 13f0ee7e9340990017bedfc4b671143456360687 Mon Sep 17 00:00:00 2001
From: Xiantao Zhang <xiantao.zhang@xxxxxxxxx>
Date: Fri, 17 Oct 2008 13:11:21 +0800
Subject: [PATCH] kvm/ia64: Lazy memory allocation for kvm/ia64

Signed-off-by: Xiantao Zhang <xiantao.zhang@xxxxxxxxx>
---
 arch/ia64/include/asm/kvm_host.h |   16 +++++++++
 arch/ia64/kvm/kvm-ia64.c         |   70
+++++++++++++++++++++++++++++++-------
 arch/ia64/kvm/misc.h             |    7 ++++
 arch/ia64/kvm/vtlb.c             |   29 +++++++++++++++-
 4 files changed, 108 insertions(+), 14 deletions(-)

diff --git a/arch/ia64/include/asm/kvm_host.h
b/arch/ia64/include/asm/kvm_host.h
index e98f6f0..c2e99dd 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -39,6 +39,7 @@
 #define EXIT_REASON_EXTERNAL_INTERRUPT	6
 #define EXIT_REASON_IPI			7
 #define EXIT_REASON_PTC_G		8
+#define EXIT_REASON_ALLOC_MEM		9
 
 /*Define vmm address space and vm data space.*/
 #define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20)
@@ -308,6 +309,12 @@ struct kvm_ptc_g {
 	struct kvm_vcpu *vcpu;
 };
 
+/* Alloc real memory exit */
+struct kvm_alloc_mem {
+	unsigned long gpfn;
+	unsigned long pmt_val;
+};
+
 /*Exit control data */
 struct exit_ctl_data{
 	uint32_t exit_reason;
@@ -319,6 +326,7 @@ struct exit_ctl_data{
 		struct kvm_switch_rr6	rr_data;
 		struct kvm_ipi_data	ipi_data;
 		struct kvm_ptc_g	ptc_g_data;
+		struct kvm_alloc_mem    alloc_mem;
 	} u;
 };
 
@@ -576,6 +584,14 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct
kvm_run *kvm_run);
 void kvm_sal_emul(struct kvm_vcpu *vcpu);
 
 static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {}
+
+static inline int kvm_is_guest_mmio(unsigned long gpfn)
+{
+	unsigned long gpa = gpfn << PAGE_SHIFT;
+	if (gpa >= MMIO_START && gpa < LEGACY_IO_START)
+		return 1;
+	return 0; 
+}
 #endif /* __ASSEMBLY__*/
 
 #endif
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 1343781..4f955cc 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -480,6 +482,39 @@ static int handle_external_interrupt(struct
kvm_vcpu *vcpu,
 	return 1;
 }
 
+static int handle_mem_alloc(struct kvm_vcpu *vcpu,
+		struct kvm_run *kvm_run)
+{
+	unsigned long pmt_val, gpfn, pfn, gpfn_off;
+	struct kvm_memory_slot *memslot;
+	struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
+
+	gpfn = p->u.alloc_mem.gpfn;
+	
+	spin_lock(&vcpu->kvm->mmu_lock);
+	pmt_val = kvm_get_pmt_entry(vcpu->kvm, gpfn);
+	if (!pmt_val) {
+
+		pfn = gfn_to_pfn(vcpu->kvm, gpfn);
+		if (!pfn_valid(pfn))
+			goto out;
+
+		kvm_set_pmt_entry(vcpu->kvm, gpfn, pfn << PAGE_SHIFT,
+						_PAGE_AR_RWX |
_PAGE_MA_WB);
+
+		memslot = gfn_to_memslot(vcpu->kvm, gpfn);
+		if (!memslot)
+			goto out;
+		gpfn_off =  gpfn - memslot->base_gfn;
+		memslot->rmap[gpfn_off] = (unsigned
long)pfn_to_page(pfn);
+		pmt_val = kvm_get_pmt_entry(vcpu->kvm, gpfn);
+	}
+out:
+	spin_unlock(&vcpu->kvm->mmu_lock);
+	p->u.alloc_mem.pmt_val = pmt_val;
+	return 1;
+}
+
 static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
 		struct kvm_run *kvm_run) = {
 	[EXIT_REASON_VM_PANIC]              = handle_vm_error,
@@ -491,6 +526,7 @@ static int (*kvm_vti_exit_handlers[])(struct
kvm_vcpu *vcpu,
 	[EXIT_REASON_EXTERNAL_INTERRUPT]    = handle_external_interrupt,
 	[EXIT_REASON_IPI]		    = handle_ipi,
 	[EXIT_REASON_PTC_G]		    = handle_global_purge,
+	[EXIT_REASON_ALLOC_MEM]		    = handle_mem_alloc,
 
 };
 
@@ -1460,21 +1496,29 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
 	struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
 	unsigned long base_gfn = memslot->base_gfn;
 
-	for (i = 0; i < npages; i++) {
-		pfn = gfn_to_pfn(kvm, base_gfn + i);
-		if (!kvm_is_mmio_pfn(pfn)) {
-			kvm_set_pmt_entry(kvm, base_gfn + i,
-					pfn << PAGE_SHIFT,
-				_PAGE_AR_RWX | _PAGE_MA_WB);
-			memslot->rmap[i] = (unsigned
long)pfn_to_page(pfn);
-		} else {
-			kvm_set_pmt_entry(kvm, base_gfn + i,
-					GPFN_PHYS_MMIO | (pfn <<
PAGE_SHIFT),
-					_PAGE_MA_UC);
-			memslot->rmap[i] = 0;
+	if (kvm_is_guest_mmio(base_gfn)) {
+		printk("base_gfn:0x%lx\n", base_gfn);
+		spin_lock(&kvm->mmu_lock);
+		for (i = 0; i < npages; i++) {
+			if (memslot->rmap[i] != 0){
+				put_page((struct page
*)memslot->rmap[i]);
+				memslot->rmap[i] = 0;
+			}
+			pfn = gfn_to_pfn(kvm, base_gfn + i);
+			if (!kvm_is_mmio_pfn(pfn)) {
+				kvm_set_pmt_entry(kvm, base_gfn + i,
+						pfn << PAGE_SHIFT,
+						_PAGE_AR_RWX |
_PAGE_MA_WB);
+				memslot->rmap[i] = (unsigned
long)pfn_to_page(pfn);
+			} else {
+				kvm_set_pmt_entry(kvm, base_gfn + i,
+						GPFN_PHYS_MMIO | (pfn <<
PAGE_SHIFT),
+						_PAGE_MA_UC);
+				memslot->rmap[i] = 0;
 			}
+		}
+		spin_unlock(&kvm->mmu_lock);
 	}
-
 	return 0;
 }
 
diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h
index c2ad19a..f9a656b 100644
--- a/arch/ia64/kvm/misc.h
+++ b/arch/ia64/kvm/misc.h
@@ -41,6 +41,13 @@ static inline void kvm_set_pmt_entry(struct kvm *kvm,
gfn_t gfn,
 	pmt_base[gfn] = pte;
 }
 
+static inline uint64_t kvm_get_pmt_entry(struct kvm *kvm, gfn_t gfn)
+{
+	uint64_t *pmt_base = kvm_host_get_pmt(kvm);
+
+	return pmt_base[gfn];
+}
+
 /*Function for translating host address to guest address*/
 
 static inline void *to_guest(struct kvm *kvm, void *addr)
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 6b6307a..7d2a805 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -570,10 +570,37 @@ void thash_init(struct thash_cb *hcb, u64 sz)
 	}
 }
 
+static unsigned long alloc_real_maddr(unsigned long gpfn)
+{
+	struct exit_ctl_data *p = &current_vcpu->arch.exit_data;
+	unsigned long psr;
+
+	local_irq_save(psr);
+
+	p->exit_reason = EXIT_REASON_ALLOC_MEM;
+	p->u.alloc_mem.gpfn = gpfn;
+	p->u.alloc_mem.pmt_val = 0;
+	vmm_transition(current_vcpu);
+
+	local_irq_restore(psr);
+
+	return p->u.alloc_mem.pmt_val;
+}
+
 u64 kvm_get_mpt_entry(u64 gpfn)
 {
 	u64 *base = (u64 *) KVM_P2M_BASE;
-	return *(base + gpfn);
+	u64 pmt_val = *(base + gpfn);
+	
+	if (!pmt_val) {
+		pmt_val = alloc_real_maddr(gpfn);
+		if (!pmt_val) {
+			//printk(KERN_ERR"kvm: NO Enough memory!\n");
+			panic_vm(current_vcpu);
+		}
+		
+	}
+	return pmt_val;
 }
 
 u64 kvm_lookup_mpa(u64 gpfn)
-- 
1.5.1

Attachment: 0001-kvm-ia64-Lazy-memory-allocation-for-kvm-ia64.patch
Description: 0001-kvm-ia64-Lazy-memory-allocation-for-kvm-ia64.patch


[Index of Archives]     [Linux KVM Devel]     [Linux Virtualization]     [Big List of Linux Books]     [Linux SCSI]     [Yosemite Forum]

  Powered by Linux