[PATCH v2 3/6] ARM: KVM: introduce {save, restore}_guest_regs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The current save/restore code treats the register file as a stack,
which makes the code quite hard to read, and even harder to
switch to a different layout.

Furthermore, the restore code is spread across several code paths
with a loose calling convention.

Instead, move the save/restore code to a set of macros with a defined
calling convention, taking care of all the registers in a single
location. The net result is cleaner code, and a relative independence
from the backing structure layout.

Signed-off-by: Marc Zyngier <marc.zyngier@xxxxxxx>
---
 arch/arm/kvm/interrupts.S      | 107 ++++++++++++------------------------
 arch/arm/kvm/interrupts_head.S | 119 +++++++++++++++++++++++++++++------------
 2 files changed, 117 insertions(+), 109 deletions(-)

diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 8d11bc0..56ec361 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -124,55 +124,30 @@ ENTRY(__kvm_vcpu_run)
 	ldr	r1, [r0, #CP15_OFFSET(c0_MPIDR)]
 	mcr	p15, 4, r1, c0, c0, 5
 
-	@ Load guest registers
-	add	r0, r0, #(VCPU_USR_SP)
-	load_mode_state r0, usr
-	load_mode_state r0, svc
-	load_mode_state r0, abt
-	load_mode_state r0, und
-	load_mode_state r0, irq
-	load_mode_state r0, fiq
-
-	@ Load return state (r0 now points to vcpu->arch.regs.pc)
-	ldmia	r0, {r2, r3}
-	msr	ELR_hyp, r2
-	msr	SPSR_cxsf, r3
-
 	@ Set up guest memory translation
-	sub	r1, r0, #(VCPU_PC - VCPU_KVM)	@ r1 points to kvm struct
-	ldr	r1, [r1]
-	add	r1, r1, #KVM_VTTBR
-	ldrd	r2, r3, [r1]
+	ldr	r1, [r0, #VCPU_KVM]
+	ldrd	r2, r3, [r1, #KVM_VTTBR]
 	mcrr	p15, 6, r2, r3, c2	@ Write VTTBR
 
-	@ Load remaining registers and do the switch
-	sub	r0, r0, #(VCPU_PC - VCPU_USR_REGS)
-	ldmia	r0, {r0-r12}
+	@ At this point, r0 must contain the pointer to the VCPU
+	restore_guest_regs
 	clrex				@ Clear exclusive monitor
 	eret
 
 __kvm_vcpu_return:
+	/*
+	 * return convention:
+	 * vcpu r0, r1, r2 saved on the stack
+	 * r0: exception code
+	 * r1: vcpu pointer
+	 */
+	save_guest_regs
+
 	@ Set VMID == 0
 	mov	r2, #0
 	mov	r3, #0
 	mcrr	p15, 6, r2, r3, c2	@ Write VTTBR
 
-	@ Store return state
-	mrs	r2, ELR_hyp
-	mrs	r3, spsr
-	str	r2, [r1, #VCPU_PC]
-	str	r3, [r1, #VCPU_CPSR]
-
-	@ Store guest registers
-	add	r1, r1, #(VCPU_FIQ_SPSR + 4)
-	store_mode_state r1, fiq
-	store_mode_state r1, irq
-	store_mode_state r1, und
-	store_mode_state r1, abt
-	store_mode_state r1, svc
-	store_mode_state r1, usr
-	sub	r1, r1, #(VCPU_USR_REG(13))
-
 	@ Don't trap coprocessor accesses for host kernel
 	set_hstr 0
 	set_hdcr 0
@@ -296,31 +271,26 @@ ENTRY(kvm_call_hyp)
 
 /* Handle undef, svc, pabt, or dabt by crashing with a user notice */
 .macro bad_exception exception_code, panic_str
-	mrrc	p15, 6, r2, r3, c2	@ Read VTTBR
-	lsr	r3, r3, #16
-	ands	r3, r3, #0xff
-
-	/*
-	 * COND:neq means we're probably in the guest and we can try fetching
-	 * the vcpu pointer and stuff off the stack and keep our fingers crossed
-	 */
+	push	{r0-r2}
+	mrrc	p15, 6, r0, r1, c2	@ Read VTTBR
+	lsr	r1, r1, #16
+	ands	r1, r1, #0xff
 	beq	99f
-	mov	r0, #\exception_code
+
 	load_vcpu	r1		@ Load VCPU pointer
 	.if \exception_code == ARM_EXCEPTION_DATA_ABORT
 	mrc	p15, 4, r2, c5, c2, 0	@ HSR
-	mrc	p15, 4, r3, c6, c0, 0	@ HDFAR
+	mrc	p15, 4, r0, c6, c0, 0	@ HDFAR
 	str	r2, [r1, #VCPU_HSR]
-	str	r3, [r1, #VCPU_HDFAR]
+	str	r0, [r1, #VCPU_HDFAR]
 	.endif
 	.if \exception_code == ARM_EXCEPTION_PREF_ABORT
 	mrc	p15, 4, r2, c5, c2, 0	@ HSR
-	mrc	p15, 4, r3, c6, c0, 2	@ HIFAR
+	mrc	p15, 4, r0, c6, c0, 2	@ HIFAR
 	str	r2, [r1, #VCPU_HSR]
-	str	r3, [r1, #VCPU_HIFAR]
+	str	r0, [r1, #VCPU_HIFAR]
 	.endif
-	mrs	r2, ELR_hyp
-	str	r2, [r1, #VCPU_HYP_PC]
+	mov	r0, #\exception_code
 	b	__kvm_vcpu_return
 
 	@ We were in the host already. Let's craft a panic-ing return to SVC.
@@ -423,25 +393,20 @@ THUMB(	orr	lr, #1)
 guest_trap:
 	load_vcpu	r1		@ Load VCPU pointer
 	str	r0, [r1, #VCPU_HSR]
-	add	r1, r1, #VCPU_USR_REG(3)
-	stmia	r1, {r3-r12}
-	sub	r1, r1, #(VCPU_USR_REG(3) - VCPU_USR_REG(0))
-	pop	{r3, r4, r5}
-	stmia	r1, {r3, r4, r5}
-	sub	r1, r1, #VCPU_USR_REG(0)
 
 	@ Check if we need the fault information
-	lsr	r2, r0, #HSR_EC_SHIFT
-	cmp	r2, #HSR_EC_IABT
+	lsr	r0, r0, #HSR_EC_SHIFT
+	cmp	r0, #HSR_EC_IABT
+	mrceq	p15, 4, r2, c6, c0, 2	@ HIFAR
+	streq	r2, [r1, #VCPU_HIFAR]
 	beq	2f
-	cmpne	r2, #HSR_EC_DABT
+	cmp	r0, #HSR_EC_DABT
 	bne	1f
+	mrc	p15, 4, r2, c6, c0, 0	@ HDFAR
+	str	r2, [r1, #VCPU_HDFAR]
 
-2:	mrc	p15, 4, r2, c6, c0, 0	@ HDFAR
-	mrc	p15, 4, r3, c6, c0, 2	@ HIFAR
-	mrc	p15, 4, r4, c6, c0, 4	@ HPFAR
-	add	r5, r1, #VCPU_HDFAR
-	stmia	r5, {r2, r3, r4}
+2:	mrc	p15, 4, r2, c6, c0, 4	@ HPFAR
+	str	r2, [r1, #VCPU_HPFAR]
 
 1:	mov	r0, #ARM_EXCEPTION_HVC
 	b	__kvm_vcpu_return
@@ -474,15 +439,9 @@ switch_to_guest_vfp:
 
 	.align
 hyp_irq:
-	push	{r0}
-	load_vcpu	r0		@ Load VCPU pointer
-	add	r0, r0, #(VCPU_USR_REG(1))
-	stmia	r0, {r1-r12}
-	pop	{r0}
-	load_vcpu	r1		@ Load VCPU pointer again
-	str	r0, [r1, #VCPU_USR_REG(0)]
-
+	push	{r0, r1, r2}
 	mov	r0, #ARM_EXCEPTION_IRQ
+	load_vcpu	r1		@ Load VCPU pointer
 	b	__kvm_vcpu_return
 
 	.align
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 5b55cd6..25433fc 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -1,7 +1,6 @@
 #define VCPU_USR_REG(_reg_nr)	(VCPU_USR_REGS + (_reg_nr * 4))
 #define VCPU_USR_SP		(VCPU_USR_REG(13))
-#define VCPU_FIQ_REG(_reg_nr)	(VCPU_FIQ_REGS + (_reg_nr * 4))
-#define VCPU_FIQ_SPSR		(VCPU_FIQ_REG(7))
+#define VCPU_USR_LR		(VCPU_USR_REG(14))
 #define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4))
 
 /* Clobbers {r2-r6} */
@@ -117,41 +116,33 @@
 	msr	ELR_hyp, r2
 .endm
 
-.macro store_mode_state base_reg, mode
-	.if \mode == usr
-	mrs	r2, SP_usr
-	mov	r3, lr
-	stmdb	\base_reg!, {r2, r3}
-	.elseif \mode != fiq
-	mrs	r2, SP_\mode
-	mrs	r3, LR_\mode
-	mrs	r4, SPSR_\mode
-	stmdb	\base_reg!, {r2, r3, r4}
-	.else
-	mrs	r2, r8_fiq
-	mrs	r3, r9_fiq
-	mrs	r4, r10_fiq
-	mrs	r5, r11_fiq
-	mrs	r6, r12_fiq
-	mrs	r7, SP_fiq
-	mrs	r8, LR_fiq
-	mrs	r9, SPSR_fiq
-	stmdb	\base_reg!, {r2-r9}
-	.endif
-.endm
-
-.macro load_mode_state base_reg, mode
-	.if \mode == usr
-	ldmia	\base_reg!, {r2, r3}
-	msr	SP_usr, r2
-	mov	lr, r3
-	.elseif \mode != fiq
-	ldmia	\base_reg!, {r2, r3, r4}
+/*
+ * Restore SP, LR and SPSR for a given mode. offset is the offset of
+ * this mode's registers from the VCPU base.
+ * r0: VCPU address
+ * Clobbers r1, r2, r3, r4.
+ */
+.macro restore_guest_regs_mode mode, offset
+	add	r1, r0, \offset
+	ldm	r1, {r2, r3, r4}
 	msr	SP_\mode, r2
 	msr	LR_\mode, r3
 	msr	SPSR_\mode, r4
-	.else
-	ldmia	\base_reg!, {r2-r9}
+.endm
+
+/*
+ * Restore all guest registers.
+ * r0: VCPU address
+ * Clobbers *all* registers.
+ */
+.macro restore_guest_regs
+	restore_guest_regs_mode svc, VCPU_SVC_REGS
+	restore_guest_regs_mode abt, VCPU_ABT_REGS
+	restore_guest_regs_mode und, VCPU_UND_REGS
+	restore_guest_regs_mode irq, VCPU_IRQ_REGS
+
+	add	r1, r0, VCPU_FIQ_REGS
+	ldm	r1, {r2-r9}
 	msr	r8_fiq, r2
 	msr	r9_fiq, r3
 	msr	r10_fiq, r4
@@ -160,7 +151,65 @@
 	msr	SP_fiq, r7
 	msr	LR_fiq, r8
 	msr	SPSR_fiq, r9
-	.endif
+
+	@ Load return state
+	ldr	r2, [r0, #VCPU_PC]
+	ldr	r3, [r0, #VCPU_CPSR]
+	msr	ELR_hyp, r2
+	msr	SPSR_cxsf, r3
+
+	@ Load user registers
+	ldr	r2, [r0, #VCPU_USR_SP]
+	ldr	r3, [r0, #VCPU_USR_LR]
+	msr	SP_usr, r2
+	mov	lr, r3
+	add	r0, r0, #(VCPU_USR_REGS)
+	ldm	r0, {r0-r12}
+.endm
+
+/*
+ * Save SP, LR and SPSR for a given mode. offset is the offset of
+ * this mode's registers from the VCPU base.
+ * r1: VCPU address
+ * Clobbers r2, r3, r4, r5.
+ */
+.macro save_guest_regs_mode mode, offset
+	add	r2, r1, \offset
+	mrs	r3, SP_\mode
+	mrs	r4, LR_\mode
+	mrs	r5, SPSR_\mode
+	stm	r2, {r3, r4, r5}
+.endm
+
+/*
+ * Save all guest registers
+ * r1: VCPU address
+ * Expects guest's r0, r1, r2 on the stack.
+ * Clobbers r2, r3, r4, r5.
+ */
+.macro save_guest_regs
+	@ Store usr registers
+	add	r2, r1, #VCPU_USR_REG(3)
+	stm	r2, {r3-r12}
+	add	r2, r1, #VCPU_USR_REG(0)
+	pop	{r3, r4, r5}		@ r0, r1, r2
+	stm	r2, {r3, r4, r5}
+	mrs	r2, SP_usr
+	mov	r3, lr
+	str	r2, [r1, #VCPU_USR_SP]
+	str	r3, [r1, #VCPU_USR_LR]
+
+	@ Store return state
+	mrs	r2, ELR_hyp
+	mrs	r3, spsr
+	str	r2, [r1, #VCPU_PC]
+	str	r3, [r1, #VCPU_CPSR]
+
+	@ Store other guest registers
+	save_guest_regs_mode svc, VCPU_SVC_REGS
+	save_guest_regs_mode abt, VCPU_ABT_REGS
+	save_guest_regs_mode und, VCPU_UND_REGS
+	save_guest_regs_mode irq, VCPU_IRQ_REGS
 .endm
 
 /* Reads cp15 registers from hardware and stores them in memory
-- 
1.7.12



_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/cucslists/listinfo/kvmarm


[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux