On Sun, Aug 26, 2012 at 7:41 AM, Marc Zyngier <marc.zyngier@xxxxxxx> wrote: > On world switch, we save the pointer to the current vcpu on the top > of the stack. This tends to complicate the exit path a bit as we then > need to dig into the stack to restore it (we've already pushed a number > of registers on top of it at that time), not to mention error prone. > > A obvious way to simplify this is to use the HTPIDR register, which is an > architected method to store per-CPU data in HYP mode, and this is > exactly what this patch does. > > Signed-off-by: Marc Zyngier <marc.zyngier@xxxxxxx> > --- > arch/arm/kvm/interrupts.S | 21 +++++++++++++-------- > 1 files changed, 13 insertions(+), 8 deletions(-) > > diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S > index 7fb0ab9..ac680b80 100644 > --- a/arch/arm/kvm/interrupts.S > +++ b/arch/arm/kvm/interrupts.S > @@ -502,11 +502,18 @@ ENDPROC(__kvm_flush_vm_context) > mcr p15, 4, r2, c1, c1, 0 > .endm > > +.macro load_vcpu reg > + mrc p15, 4, \reg, c13, c0, 2 @ HTPIDR > +.endm > + > @ Arguments: > @ r0: pointer to vcpu struct > ENTRY(__kvm_vcpu_run) > hvc #0 @ switch to hyp-mode > > + @ Save the vcpu pointer > + mcr p15, 4, r0, c13, c0, 2 @ HTPIDR > + > @ Now we're in Hyp-mode and lr_usr, spsr_hyp are on the stack > mrs r2, sp_usr > push {r2} @ Push r13_usr > @@ -535,8 +542,6 @@ ENTRY(__kvm_vcpu_run) > VFPFMXR FPEXC, r2 @ VMSR > #endif > > - push {r0} @ Push the VCPU pointer > - > @ Configure Hyp-role > configure_hyp_role 1, r0 > > @@ -707,7 +712,7 @@ after_vfp_restore: > @ the vcpu pointer and stuff off the stack and keep our fingers crossed > beq 99f > mov r0, #\exception_code > - pop {r1} @ Load VCPU pointer > + load_vcpu r1 @ Load VCPU pointer > .if \exception_code == ARM_EXCEPTION_DATA_ABORT > mrc p15, 4, r2, c5, c2, 0 @ HSR > mrc p15, 4, r3, c6, c0, 0 @ HDFAR > @@ -831,13 +836,12 @@ host_switch_to_hyp: > mov pc, lr > > guest_trap: > - ldr r1, [sp, #12] @ Load VCPU pointer > + load_vcpu r1 @ Load VCPU pointer > str r0, [r1, #VCPU_HSR] > add r1, r1, #VCPU_USR_REG(3) > stmia r1, {r3-r12} > sub r1, r1, #(VCPU_USR_REG(3) - VCPU_USR_REG(0)) > pop {r3, r4, r5} > - add sp, sp, #4 @ We loaded the VCPU pointer above > stmia r1, {r3, r4, r5} > sub r1, r1, #VCPU_USR_REG(0) > > @@ -891,7 +895,7 @@ guest_trap: > @ inject an undefined exception to the guest. > #ifdef CONFIG_VFPv3 > switch_to_guest_vfp: > - ldr r0, [sp, #12] @ Load VCPU pointer > + load_vcpu r0 @ Load VCPU pointer > push {r3-r7} > > @ NEON/VFP used. Turn on VFP access. > @@ -912,10 +916,11 @@ switch_to_guest_vfp: > .align > hyp_irq: > push {r0} > - ldr r0, [sp, #4] @ Load VCPU pointer > + load_vcpu r0 @ Load VCPU pointer > add r0, r0, #(VCPU_USR_REG(1)) > stmia r0, {r1-r12} > - pop {r0, r1} @ r1 == vcpu pointer > + pop {r0} > + load_vcpu r1 @ Load VCPU pointer again > str r0, [r1, #VCPU_USR_REG(0)] > > mov r0, #ARM_EXCEPTION_IRQ > -- > 1.7.8.6 > Thanks, applied _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/cucslists/listinfo/kvmarm