[patch 10/18] x86: kvm guest: pvclock vsyscall support

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hook into generic pvclock vsyscall code, with the aim to 
allow userspace to have visibility into pvclock data.

Signed-off-by: Marcelo Tosatti <mtosatti@xxxxxxxxxx>

Index: vsyscall/arch/x86/kernel/kvmclock.c
===================================================================
--- vsyscall.orig/arch/x86/kernel/kvmclock.c
+++ vsyscall/arch/x86/kernel/kvmclock.c
@@ -40,11 +40,7 @@ static int parse_no_kvmclock(char *arg)
 early_param("no-kvmclock", parse_no_kvmclock);
 
 /* The hypervisor will put information about time periodically here */
-struct pvclock_aligned_vcpu_time_info {
-	struct pvclock_vcpu_time_info clock;
-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
-
-static struct pvclock_aligned_vcpu_time_info *hv_clock;
+static struct pvclock_vsyscall_time_info *hv_clock;
 static struct pvclock_wall_clock wall_clock;
 
 /*
@@ -67,7 +63,7 @@ static unsigned long kvm_get_wallclock(v
 
 	native_write_msr(msr_kvm_wall_clock, low, high);
 
-	vcpu_time = &hv_clock[cpu].clock;
+	vcpu_time = &hv_clock[cpu].pvti;
 	pvclock_read_wallclock(&wall_clock, vcpu_time, &ts);
 
 	preempt_enable();
@@ -88,7 +84,7 @@ static cycle_t kvm_clock_read(void)
 
 	preempt_disable_notrace();
 	cpu = smp_processor_id();
-	src = &hv_clock[cpu].clock;
+	src = &hv_clock[cpu].pvti;
 	ret = pvclock_clocksource_read(src);
 	preempt_enable_notrace();
 	return ret;
@@ -116,7 +112,7 @@ static unsigned long kvm_get_tsc_khz(voi
 
 	preempt_disable();
 	cpu = smp_processor_id();
-	src = &hv_clock[cpu].clock;
+	src = &hv_clock[cpu].pvti;
 	tsc_khz = pvclock_tsc_khz(src);
 	preempt_enable();
 	return tsc_khz;
@@ -143,7 +139,7 @@ bool kvm_check_and_clear_guest_paused(vo
 	if (!hv_clock)
 		return ret;
 
-	src = &hv_clock[cpu].clock;
+	src = &hv_clock[cpu].pvti;
 	if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
 		src->flags &= ~PVCLOCK_GUEST_STOPPED;
 		ret = true;
@@ -164,7 +160,7 @@ int kvm_register_clock(char *txt)
 {
 	int cpu = smp_processor_id();
 	int low, high, ret;
-	struct pvclock_vcpu_time_info *src = &hv_clock[cpu].clock;
+	struct pvclock_vcpu_time_info *src = &hv_clock[cpu].pvti;
 
 	low = (int)__pa(src) | 1;
 	high = ((u64)__pa(src) >> 32);
@@ -226,7 +222,7 @@ void __init kvmclock_init(void)
 	if (!kvm_para_available())
 		return;
 
-	mem = memblock_alloc(sizeof(struct pvclock_aligned_vcpu_time_info) * NR_CPUS,
+	mem = memblock_alloc(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS,
 			     PAGE_SIZE);
 	if (!mem)
 		return;
@@ -265,3 +261,36 @@ void __init kvmclock_init(void)
 	if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
 		pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
 }
+
+int kvm_setup_vsyscall_timeinfo(void)
+{
+	int cpu;
+	int ret;
+	u8 flags;
+	struct pvclock_vcpu_time_info *vcpu_time;
+	unsigned int size;
+
+	size = sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS;
+
+	preempt_disable();
+	cpu = smp_processor_id();
+
+	vcpu_time = &hv_clock[cpu].pvti;
+	flags = pvclock_read_flags(vcpu_time);
+
+	if (!(flags & PVCLOCK_TSC_STABLE_BIT)) {
+		preempt_enable();
+		return 1;
+	}
+
+	if ((ret = pvclock_init_vsyscall(hv_clock, size))) {
+		preempt_enable();
+		return ret;
+	}
+
+	preempt_enable();
+
+	kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
+	return 0;
+}
+
Index: vsyscall/arch/x86/kernel/kvm.c
===================================================================
--- vsyscall.orig/arch/x86/kernel/kvm.c
+++ vsyscall/arch/x86/kernel/kvm.c
@@ -42,6 +42,7 @@
 #include <asm/apic.h>
 #include <asm/apicdef.h>
 #include <asm/hypervisor.h>
+#include <asm/kvm_guest.h>
 
 static int kvmapf = 1;
 
@@ -62,6 +63,15 @@ static int parse_no_stealacc(char *arg)
 
 early_param("no-steal-acc", parse_no_stealacc);
 
+static int kvmclock_vsyscall = 1;
+static int parse_no_kvmclock_vsyscall(char *arg)
+{
+        kvmclock_vsyscall = 0;
+        return 0;
+}
+
+early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
+
 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
 static int has_steal_clock = 0;
@@ -468,6 +478,9 @@ void __init kvm_guest_init(void)
 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
 		apic_set_eoi_write(kvm_guest_apic_eoi_write);
 
+	if (kvmclock_vsyscall)
+		kvm_setup_vsyscall_timeinfo();
+
 #ifdef CONFIG_SMP
 	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
 	register_cpu_notifier(&kvm_cpu_notifier);
Index: vsyscall/arch/x86/include/asm/kvm_guest.h
===================================================================
--- /dev/null
+++ vsyscall/arch/x86/include/asm/kvm_guest.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_X86_KVM_GUEST_H
+#define _ASM_X86_KVM_GUEST_H
+
+int kvm_setup_vsyscall_timeinfo(void);
+
+#endif /* _ASM_X86_KVM_GUEST_H */


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux