[PATCH] kvmppc: add detailed exit timing statistic v2

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Christian Ehrhardt <ehrhardt@xxxxxxxxxxxxxxxxxx>

Since relay based statistic approaches slow us down to much in some scenarios
this patch adds a configurable exit timing statistic.
It is powerpc only and holds timing stats of exit/reenter per exit type read
via debugfs.

updates in v2:
 * moved statistic code to a separate header
 * changed code to be at least basically multi cpu/guest safe
 * reworked old noop stubs used if exit timing is not configured
 * combined kvm-stat based accounting with this (less code in our main code flow)
 * drop internal debug print_exit_timing and it's callers
 * changed access from sysrq to debugfs using seq_file
 * introduced per vm & per vcpu debugfs reports
 * a lot of renaming/cleaning

The issue behind this patch is that trace based approaches generate too much
overhead in our hot path, while the existng kvm_stat interface is not able to
report per vm/vcpu (or would become unusable by the high number of files I
would need). Therefore this path uses the elso existing seq_file helpers to
extend the data presented in the %debugfs%/kvm directory.

This emerged from a debug patch I once created. I reworked and polished it to
allow it to be applied to our powerpc upstrema code, but it can't deny it's
heritage so this patch v2 is my rfc to see if/how I could/should do different.

Signed-off-by: Christian Ehrhardt <ehrhardt@xxxxxxxxxxxxxxxxxx>
---

[diffstat]
 arch/powerpc/kernel/asm-offsets.c      |   11 ++
 arch/powerpc/kvm/Kconfig               |    9 ++
 arch/powerpc/kvm/booke_guest.c         |   39 +++++----
 arch/powerpc/kvm/booke_interrupts.S    |   24 +++++
 arch/powerpc/kvm/powerpc.c             |   93 ++++++++++++++++++++++
 include/asm-powerpc/kvm_host.h         |   50 ++++++++++++
 include/asm-powerpc/kvm_timing_stats.h |  136 +++++++++++++++++++++++++++++++++
 7 files changed, 347 insertions(+), 15 deletions(-)

[diff]

diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -348,5 +348,16 @@
 	DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
 #endif
 
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+	DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
+						arch.timing_exit.tv32.tbu));
+	DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu,
+						arch.timing_exit.tv32.tbl));
+	DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu,
+					arch.timing_last_enter.tv32.tbu));
+	DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu,
+					arch.timing_last_enter.tv32.tbl));
+#endif
+
 	return 0;
 }
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -37,6 +37,15 @@
 	  Provides host support for KVM on Book E PowerPC processors. Currently
 	  this works on 440 processors only.
 
+config KVM_BOOKE_EXIT_TIMING
+	bool "Trace detailed exit Timing"
+	depends on KVM_BOOKE_HOST
+	---help---
+	  Inserts code to trace timestamps for every exit/enter cycle. A per vcpu
+	  report is available in debugfs kvm/VM_###/VPCU_###_exit_timing.
+	  The overhead is relatively small, however it is not recommended for
+	  production environments.
+
 config KVM_TRACE
 	bool "KVM trace support"
 	depends on KVM && MARKERS && SYSFS
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke_guest.c
--- a/arch/powerpc/kvm/booke_guest.c
+++ b/arch/powerpc/kvm/booke_guest.c
@@ -29,6 +29,7 @@
 #include <asm/cputable.h>
 #include <asm/uaccess.h>
 #include <asm/kvm_ppc.h>
+#include <asm/kvm_timing_stats.h>
 
 #include "44x_tlb.h"
 
@@ -228,6 +229,9 @@
 	enum emulation_result er;
 	int r = RESUME_HOST;
 
+	/* update before a new last_exit_type is rewritten */
+	update_timing_stats(vcpu);
+
 	local_irq_enable();
 
 	run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -241,7 +245,7 @@
 		break;
 
 	case BOOKE_INTERRUPT_EXTERNAL:
-		vcpu->stat.ext_intr_exits++;
+		account_exit(vcpu, EXT_INTR_EXITS);
 		if (need_resched())
 			cond_resched();
 		r = RESUME_GUEST;
@@ -256,7 +260,7 @@
 		 * we do reschedule the host will fault over it. Perhaps we
 		 * should politely restore the host's entries to minimize
 		 * misses before ceding control. */
-		vcpu->stat.dec_exits++;
+		account_exit(vcpu, DEC_EXITS);
 		if (need_resched())
 			cond_resched();
 		r = RESUME_GUEST;
@@ -269,6 +273,7 @@
 			vcpu->arch.esr = vcpu->arch.fault_esr;
 			kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM);
 			r = RESUME_GUEST;
+			account_exit(vcpu, USR_PR_INST);
 			break;
 		}
 
@@ -277,12 +282,12 @@
 		case EMULATE_DONE:
 			/* Future optimization: only reload non-volatiles if
 			 * they were actually modified by emulation. */
-			vcpu->stat.emulated_inst_exits++;
+			account_exit(vcpu, EMULATED_INST_EXITS);
 			r = RESUME_GUEST_NV;
 			break;
 		case EMULATE_DO_DCR:
 			run->exit_reason = KVM_EXIT_DCR;
-			vcpu->stat.dcr_exits++;
+			account_exit(vcpu, DCR_EXITS);
 			r = RESUME_HOST;
 			break;
 		case EMULATE_FAIL:
@@ -302,6 +307,7 @@
 
 	case BOOKE_INTERRUPT_FP_UNAVAIL:
 		kvmppc_queue_exception(vcpu, exit_nr);
+		account_exit(vcpu, FP_UNAVAIL);
 		r = RESUME_GUEST;
 		break;
 
@@ -309,24 +315,24 @@
 		vcpu->arch.dear = vcpu->arch.fault_dear;
 		vcpu->arch.esr = vcpu->arch.fault_esr;
 		kvmppc_queue_exception(vcpu, exit_nr);
-		vcpu->stat.dsi_exits++;
+		account_exit(vcpu, DSI_EXITS);
 		r = RESUME_GUEST;
 		break;
 
 	case BOOKE_INTERRUPT_INST_STORAGE:
 		vcpu->arch.esr = vcpu->arch.fault_esr;
 		kvmppc_queue_exception(vcpu, exit_nr);
-		vcpu->stat.isi_exits++;
+		account_exit(vcpu, ISI_EXITS);
 		r = RESUME_GUEST;
 		break;
 
 	case BOOKE_INTERRUPT_SYSCALL:
 		if (vcpu->arch.last_inst == KVM_HYPERCALL_BIN) {
 			kvmppc_do_hypercall(vcpu);
-			vcpu->stat.hcall_exits++;
+			account_exit(vcpu, HCALL_EXITS);
 		} else {
 			kvmppc_queue_exception(vcpu, exit_nr);
-			vcpu->stat.syscall_exits++;
+			account_exit(vcpu, SYSCALL_EXITS);
 		}
 		r = RESUME_GUEST;
 		break;
@@ -340,7 +346,7 @@
 			kvmppc_mmu_map(vcpu, eaddr,
 			 vcpu->arch.pvmem_gpaddr >> KVM_PPCPV_MAGIC_PAGE_SHIFT,
 			 0, KVM_PPCPV_MAGIC_PAGE_FLAGS);
-			vcpu->stat.dtlb_pvmem_miss_exits++;
+			account_exit(vcpu, DTLB_PVMEM_MISS_EXITS);
 			r = RESUME_GUEST;
 			break;
 		}
@@ -352,7 +358,7 @@
 			kvmppc_queue_exception(vcpu, exit_nr);
 			vcpu->arch.dear = vcpu->arch.fault_dear;
 			vcpu->arch.esr = vcpu->arch.fault_esr;
-			vcpu->stat.dtlb_real_miss_exits++;
+			account_exit(vcpu, DTLB_REAL_MISS_EXITS);
 			r = RESUME_GUEST;
 			break;
 		}
@@ -369,13 +375,13 @@
 			 * invoking the guest. */
 			kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid,
 			               gtlbe->word2);
-			vcpu->stat.dtlb_virt_miss_exits++;
+			account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
 			r = RESUME_GUEST;
 		} else {
 			/* Guest has mapped and accessed a page which is not
 			 * actually RAM. */
 			r = kvmppc_emulate_mmio(run, vcpu);
-			vcpu->stat.mmio_exits++;
+			account_exit(vcpu, MMIO_EXITS);
 		}
 
 		break;
@@ -393,11 +399,11 @@
 		if (!gtlbe) {
 			/* The guest didn't have a mapping for it. */
 			kvmppc_queue_exception(vcpu, exit_nr);
-			vcpu->stat.itlb_real_miss_exits++;
+			account_exit(vcpu, ITLB_REAL_MISS_EXITS);
 			break;
 		}
 
-		vcpu->stat.itlb_virt_miss_exits++;
+		account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
 
 		gfn = tlb_xlate(gtlbe, eaddr) >> PAGE_SHIFT;
 
@@ -430,6 +436,7 @@
 		mtspr(SPRN_DBSR, dbsr);
 
 		run->exit_reason = KVM_EXIT_DEBUG;
+		account_exit(vcpu, DEBUG_EXITS);
 		r = RESUME_HOST;
 		break;
 	}
@@ -450,7 +457,7 @@
 		if (signal_pending(current)) {
 			run->exit_reason = KVM_EXIT_INTR;
 			r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
-			vcpu->stat.signal_exits++;
+			account_exit(vcpu, SIGNAL_EXITS);
 		}
 	}
 
@@ -490,6 +497,8 @@
 	 * real timebase frequency. Accordingly, it must see the state of
 	 * CCR1[TCS]. */
 	vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
+
+	init_timing_stats(vcpu);
 
 	return 0;
 }
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -109,6 +109,18 @@
 
 	li	r6, 1
 	slw	r6, r6, r5
+
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+	/* save exit time */
+..exit_tbu_overflow_loop:
+	mfspr	r7, SPRN_TBRU
+	mfspr	r8, SPRN_TBRL
+	mfspr	r9, SPRN_TBRU
+	cmpw	r9, r7
+	bne	..exit_tbu_overflow_loop
+	stw	r8, VCPU_TIMING_EXIT_TBL(r4)
+	stw	r9, VCPU_TIMING_EXIT_TBU(r4)
+#endif
 
 	/* Save the faulting instruction and all GPRs for emulation. */
 	andi.	r7, r6, NEED_INST_MASK
@@ -415,6 +427,18 @@
 	lwz	r3, VCPU_SPRG7(r4)
 	mtspr	SPRN_SPRG7, r3
 
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+	/* save enter time */
+..enter_tbu_overflow_loop:
+	mfspr	r6, SPRN_TBRU
+	mfspr	r7, SPRN_TBRL
+	mfspr	r8, SPRN_TBRU
+	cmpw	r8, r6
+	bne	..enter_tbu_overflow_loop
+	stw	r7, VCPU_TIMING_LAST_ENTER_TBL(r4)
+	stw	r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
+#endif
+
 	/* Finish loading guest volatiles and jump to guest. */
 	lwz	r3, VCPU_CTR(r4)
 	mtctr	r3
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -28,6 +28,10 @@
 #include <asm/cputable.h>
 #include <asm/uaccess.h>
 #include <asm/kvm_ppc.h>
+#include <asm/kvm_timing_stats.h>
+#include <asm/atomic.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
 
 
 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
@@ -108,6 +112,97 @@
 	*(int *)rtn = r;
 }
 
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = {
+	"MMIO",
+	"DCR",
+	"SIGNAL",
+	"ITLBREAL",
+	"ITLBVIRT",
+	"DTLBREAL",
+	"DTLBPV",
+	"DTLBVIRT",
+	"SYSCALL",
+	"HCALL",
+	"ISI",
+	"DSI",
+	"EMULINST",
+	"DEC",
+	"EXTINT",
+	"HALT",
+	"USR_PR_INST",
+	"FP_UNAVAIL",
+	"DEBUG",
+	"TIMEINGUEST"
+};
+
+static int kvmppc_exit_timing_show(struct seq_file *m, void *private)
+{
+	struct kvm_vcpu *vcpu = m->private;
+	int i;
+	u64 min, max;
+
+	for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
+		if (vcpu->arch.timing_min_duration[i] == 0xFFFFFFFF)
+			min = 0;
+		else
+			min = vcpu->arch.timing_min_duration[i];
+		if (vcpu->arch.timing_max_duration[i] == 0)
+			max = 0;
+		else
+			max = vcpu->arch.timing_max_duration[i];
+
+		seq_printf(m, "%12s: count %10d min %10lld "
+			"max %10lld sum %20lld sum_quad %20lld\n",
+			kvm_exit_names[i], vcpu->arch.timing_count_type[i],
+			vcpu->arch.timing_min_duration[i],
+			vcpu->arch.timing_max_duration[i],
+			vcpu->arch.timing_sum_duration[i],
+			vcpu->arch.timing_sum_quad_duration[i]);
+	}
+	return 0;
+}
+
+static int kvmppc_exit_timing_open(struct inode *inode, struct file *file)
+{
+	/* inode->i_private holds *vcpu from debugfs file creation */
+	return single_open(file, kvmppc_exit_timing_show, inode->i_private);
+}
+
+static struct file_operations kvmppc_exit_timing_fops = {
+	.owner   = THIS_MODULE,
+	.open    = kvmppc_exit_timing_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+};
+
+static atomic_t vm_count = ATOMIC_INIT(0);
+
+static void kvmppc_create_vm_debugfs(struct kvm *kvm)
+{
+	static char dbg_dname[10];
+	int vm_number;
+
+	vm_number = atomic_inc_return(&vm_count);
+	snprintf(dbg_dname, sizeof(dbg_dname), "VM_%03d", vm_number);
+	kvm->arch.debugfs_dir = debugfs_create_dir(dbg_dname, kvm_debugfs_dir);
+}
+
+static void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id)
+{
+	static char dbg_fname[24];
+
+	snprintf(dbg_fname, sizeof(dbg_fname), "VCPU_%03d_exit_timing", id);
+	vcpu->arch.debugfs_exit_timing = debugfs_create_file(dbg_fname, 0444,
+					vcpu->kvm->arch.debugfs_dir, vcpu,
+					&kvmppc_exit_timing_fops);
+}
+#else
+#define kvmppc_create_vm_debugfs(x) do { } while (0)
+#define kvmppc_create_vcpu_debugfs(x, y) do { } while (0)
+#endif /* CONFIG_KVM_BOOKE_EXIT_TIMING */
+
 struct kvm *kvm_arch_create_vm(void)
 {
 	struct kvm *kvm;
@@ -115,6 +210,8 @@
 	kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
 	if (!kvm)
 		return ERR_PTR(-ENOMEM);
+
+	kvmppc_create_vm_debugfs(kvm);
 
 	return kvm;
 }
@@ -135,6 +232,10 @@
 {
 	kvmppc_free_vcpus(kvm);
 	kvm_free_physmem(kvm);
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+	atomic_dec(&vm_count);
+	debugfs_remove(kvm->arch.debugfs_dir);
+#endif
 	kfree(kvm);
 }
 
@@ -201,6 +302,8 @@
 	if (err)
 		goto free_vcpu;
 
+	kvmppc_create_vcpu_debugfs(vcpu, id);
+
 	return vcpu;
 
 free_vcpu:
@@ -217,6 +320,9 @@
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+	debugfs_remove(vcpu->arch.debugfs_exit_timing);
+#endif
 	kvm_arch_vcpu_free(vcpu);
 }
 
diff --git a/include/asm-powerpc/kvm_host.h b/include/asm-powerpc/kvm_host.h
--- a/include/asm-powerpc/kvm_host.h
+++ b/include/asm-powerpc/kvm_host.h
@@ -73,7 +73,44 @@
 	u32 word2;
 };
 
+enum kvm_exit_types {
+	MMIO_EXITS,
+	DCR_EXITS,
+	SIGNAL_EXITS,
+	ITLB_REAL_MISS_EXITS,
+	ITLB_VIRT_MISS_EXITS,
+	DTLB_REAL_MISS_EXITS,
+	DTLB_PVMEM_MISS_EXITS,
+	DTLB_VIRT_MISS_EXITS,
+	SYSCALL_EXITS,
+	HCALL_EXITS,
+	ISI_EXITS,
+	DSI_EXITS,
+	EMULATED_INST_EXITS,
+	DEC_EXITS,
+	EXT_INTR_EXITS,
+	HALT_WAKEUP,
+	USR_PR_INST,
+	FP_UNAVAIL,
+	DEBUG_EXITS,
+	TIMEINGUEST,
+	__NUMBER_OF_KVM_EXIT_TYPES
+};
+
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+/* allow access to big endian 32bit upper/lower parts and 64bit var */
+typedef union {
+	u64 tv64;
+	struct {
+		u32 tbu, tbl;
+	} tv32;
+} exit_timing_t;
+#endif
+
 struct kvm_arch {
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+	struct dentry *debugfs_dir;
+#endif
 };
 
 struct kvm_vcpu_arch {
@@ -139,6 +176,19 @@
 	u32 dbcr0;
 	u32 dbcr1;
 
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+	exit_timing_t timing_exit;
+	exit_timing_t timing_last_enter;
+	u32 last_exit_type;
+	u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
+	u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+	u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+	u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+	u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+	u64 timing_last_exit;
+	struct dentry *debugfs_exit_timing;
+#endif
+
 	u32 last_inst;
 	u32 fault_dear;
 	u32 fault_esr;
diff --git a/include/asm-powerpc/kvm_timing_stats.h b/include/asm-powerpc/kvm_timing_stats.h
new file mode 100644
--- /dev/null
+++ b/include/asm-powerpc/kvm_timing_stats.h
@@ -0,0 +1,136 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Christian Ehrhardt <ehrhardt@xxxxxxxxxxxxxxxxxx>
+ */
+
+#ifndef __POWERPC_KVM_EXITTIMING_H__
+#define __POWERPC_KVM_EXITTIMING_H__
+
+#include <linux/kvm_host.h>
+#include <asm/time.h>
+#include <asm-generic/div64.h>
+#include <linux/sysrq.h>
+
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+static inline void init_timing_stats(struct kvm_vcpu *vcpu)
+{
+	int i;
+
+	vcpu->arch.last_exit_type = 0xDEAD;
+	for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
+		vcpu->arch.timing_count_type[i] = 0;
+		vcpu->arch.timing_max_duration[i] = 0;
+		vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF;
+		vcpu->arch.timing_sum_duration[i] = 0;
+		vcpu->arch.timing_sum_quad_duration[i] = 0;
+	}
+	vcpu->arch.timing_last_exit = 0;
+	vcpu->arch.timing_exit.tv64 = 0;
+	vcpu->arch.timing_last_enter.tv64 = 0;
+}
+
+static inline void add_exit_timing(struct kvm_vcpu *vcpu,
+					u64 duration, int type)
+{
+	u64 old;
+
+	do_div(duration, tb_ticks_per_usec);
+	if (unlikely(duration > 0xFFFFFFFF)) {
+		printk(KERN_ERR"%s - duration too big -> overflow"
+			" duration %lld type %d exit #%d\n",
+			__func__, duration, type,
+			vcpu->arch.timing_count_type[type]);
+	}
+
+	vcpu->arch.timing_count_type[type]++;
+
+	/* sum */
+	old = vcpu->arch.timing_sum_duration[type];
+	vcpu->arch.timing_sum_duration[type] += duration;
+	if (unlikely(old > vcpu->arch.timing_sum_duration[type])) {
+		printk(KERN_ERR"%s - wrap adding sum of durations"
+			" old %lld new %lld type %d exit # of type %d\n",
+			__func__, old, vcpu->arch.timing_sum_duration[type],
+			type, vcpu->arch.timing_count_type[type]);
+	}
+
+	/* square sum */
+	old = vcpu->arch.timing_sum_quad_duration[type];
+	vcpu->arch.timing_sum_quad_duration[type] += (duration*duration);
+	if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) {
+		printk(KERN_ERR"%s - wrap adding sum of squared durations"
+			" old %lld new %lld type %d exit # of type %d\n",
+			__func__, old,
+			vcpu->arch.timing_sum_quad_duration[type],
+			type, vcpu->arch.timing_count_type[type]);
+	}
+
+	/* set min/max */
+	if (unlikely(duration < vcpu->arch.timing_min_duration[type]))
+		vcpu->arch.timing_min_duration[type] = duration;
+	if (unlikely(duration > vcpu->arch.timing_max_duration[type]))
+		vcpu->arch.timing_max_duration[type] = duration;
+}
+
+static inline void update_timing_stats(struct kvm_vcpu *vcpu)
+{
+	u64 exit = vcpu->arch.timing_last_exit;
+	u64 enter = vcpu->arch.timing_last_enter.tv64;
+
+	/* save exit time, used next exit when the reenter time is known */
+	vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64;
+
+	if (unlikely(vcpu->arch.last_exit_type == 0xDEAD))
+		return; /* skip first incomplete enter/exit cycle */
+
+	/* update statistics for average and standard deviation */
+	add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type);
+	/* enter -> timing_last_exit is time spent in guest - log this too */
+	add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter),
+			TIMEINGUEST);
+}
+#else
+#define init_timing_stats(x) do { } while (0)
+#define update_timing_stats(x) do { } while (0)
+#endif /* CONFIG_KVM_BOOKE_EXIT_TIMING */
+
+static inline void account_exit(struct kvm_vcpu *vcpu, int type)
+{
+#ifdef CONFIG_KVM_BOOKE_EXIT_TIMING
+	vcpu->arch.last_exit_type = type;
+#endif
+	/* type is usually known at build time */
+	switch (type) {
+	case EXT_INTR_EXITS: vcpu->stat.ext_intr_exits++; break;
+	case DEC_EXITS: vcpu->stat.dec_exits++; break;
+	case EMULATED_INST_EXITS: vcpu->stat.emulated_inst_exits++; break;
+	case DCR_EXITS: vcpu->stat.dcr_exits++; break;
+	case DSI_EXITS: vcpu->stat.dsi_exits++; break;
+	case ISI_EXITS: vcpu->stat.isi_exits++; break;
+	case HCALL_EXITS: vcpu->stat.hcall_exits++; break;
+	case SYSCALL_EXITS: vcpu->stat.syscall_exits++; break;
+	case DTLB_PVMEM_MISS_EXITS: vcpu->stat.dtlb_pvmem_miss_exits++; break;
+	case DTLB_REAL_MISS_EXITS: vcpu->stat.dtlb_real_miss_exits++; break;
+	case DTLB_VIRT_MISS_EXITS: vcpu->stat.dtlb_virt_miss_exits++; break;
+	case MMIO_EXITS: vcpu->stat.mmio_exits++; break;
+	case ITLB_REAL_MISS_EXITS: vcpu->stat.itlb_real_miss_exits++; break;
+	case ITLB_VIRT_MISS_EXITS: vcpu->stat.itlb_virt_miss_exits++; break;
+	case SIGNAL_EXITS: vcpu->stat.signal_exits++; break;
+	}
+}
+
+#endif /* __POWERPC_KVM_EXITTIMING_H__ */
--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [KVM Development]     [KVM ARM]     [KVM ia64]     [Linux Virtualization]     [Linux USB Devel]     [Linux Video]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [Big List of Linux Books]

  Powered by Linux