[PATCH 2/3] KVM: arm64: Reduce overhead of trapped timer sysreg accesses

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Each read/write to a trapped timer system register results
in a whole kvm_timer_vcpu_put/load() cycle which affects all
of the timers, and a bit more.

There is no need for such a thing, and we can limit the impact
to the timer being affected, and only this one.

This drastically simplifies the emulated case, and limits the
damage for trapped accesses. This also brings some performance
back for NV.

Whilst we're at it, fix a comment that didn't quite capture why
we always set CNTVOFF_EL2 to 0 when disabling the virtual timer.

Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx>
---
 arch/arm64/kvm/arch_timer.c | 73 ++++++++++++++++++++++++-------------
 1 file changed, 48 insertions(+), 25 deletions(-)

diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 587d87aec33f..1a1d7e258aba 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -434,6 +434,11 @@ static void timer_emulate(struct arch_timer_context *ctx)
 	soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
 }
 
+static void set_cntvoff(u64 cntvoff)
+{
+	kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
+}
+
 static void timer_save_state(struct arch_timer_context *ctx)
 {
 	struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
@@ -457,6 +462,22 @@ static void timer_save_state(struct arch_timer_context *ctx)
 		write_sysreg_el0(0, SYS_CNTV_CTL);
 		isb();
 
+		/*
+		 * The kernel may decide to run userspace after
+		 * calling vcpu_put, so we reset cntvoff to 0 to
+		 * ensure a consistent read between user accesses to
+		 * the virtual counter and kernel access to the
+		 * physical counter of non-VHE case.
+		 *
+		 * For VHE, the virtual counter uses a fixed virtual
+		 * offset of zero, so no need to zero CNTVOFF_EL2
+		 * register, but this is actually useful when switching
+		 * between EL1/vEL2 with NV.
+		 *
+		 * Do it unconditionally, as this is either unavoidable
+		 * or dirt cheap.
+		 */
+		set_cntvoff(0);
 		break;
 	case TIMER_PTIMER:
 		timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
@@ -530,6 +551,7 @@ static void timer_restore_state(struct arch_timer_context *ctx)
 
 	switch (index) {
 	case TIMER_VTIMER:
+		set_cntvoff(timer_get_offset(ctx));
 		write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
 		isb();
 		write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
@@ -550,11 +572,6 @@ static void timer_restore_state(struct arch_timer_context *ctx)
 	local_irq_restore(flags);
 }
 
-static void set_cntvoff(u64 cntvoff)
-{
-	kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
-}
-
 static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
 {
 	int r;
@@ -629,8 +646,6 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
 		kvm_timer_vcpu_load_nogic(vcpu);
 	}
 
-	set_cntvoff(timer_get_offset(map.direct_vtimer));
-
 	kvm_timer_unblocking(vcpu);
 
 	timer_restore_state(map.direct_vtimer);
@@ -686,15 +701,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
 
 	if (kvm_vcpu_is_blocking(vcpu))
 		kvm_timer_blocking(vcpu);
-
-	/*
-	 * The kernel may decide to run userspace after calling vcpu_put, so
-	 * we reset cntvoff to 0 to ensure a consistent read between user
-	 * accesses to the virtual counter and kernel access to the physical
-	 * counter of non-VHE case. For VHE, the virtual counter uses a fixed
-	 * virtual offset of zero, so no need to zero CNTVOFF_EL2 register.
-	 */
-	set_cntvoff(0);
 }
 
 /*
@@ -924,14 +930,22 @@ u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
 			      enum kvm_arch_timers tmr,
 			      enum kvm_arch_timer_regs treg)
 {
+	struct arch_timer_context *timer;
+	struct timer_map map;
 	u64 val;
 
+	get_timer_map(vcpu, &map);
+	timer = vcpu_get_timer(vcpu, tmr);
+
+	if (timer == map.emul_ptimer)
+		return kvm_arm_timer_read(vcpu, timer, treg);
+
 	preempt_disable();
-	kvm_timer_vcpu_put(vcpu);
+	timer_save_state(timer);
 
-	val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg);
+	val = kvm_arm_timer_read(vcpu, timer, treg);
 
-	kvm_timer_vcpu_load(vcpu);
+	timer_restore_state(timer);
 	preempt_enable();
 
 	return val;
@@ -965,13 +979,22 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
 				enum kvm_arch_timer_regs treg,
 				u64 val)
 {
-	preempt_disable();
-	kvm_timer_vcpu_put(vcpu);
-
-	kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val);
+	struct arch_timer_context *timer;
+	struct timer_map map;
 
-	kvm_timer_vcpu_load(vcpu);
-	preempt_enable();
+	get_timer_map(vcpu, &map);
+	timer = vcpu_get_timer(vcpu, tmr);
+	if (timer == map.emul_ptimer) {
+		soft_timer_cancel(&timer->hrtimer);
+		kvm_arm_timer_write(vcpu, timer, treg, val);
+		timer_emulate(timer);
+	} else {
+		preempt_disable();
+		timer_save_state(timer);
+		kvm_arm_timer_write(vcpu, timer, treg, val);
+		timer_restore_state(timer);
+		preempt_enable();
+	}
 }
 
 static int kvm_timer_starting_cpu(unsigned int cpu)
-- 
2.34.1




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux