[PATCH] KVM: ARM: Rework world-switch assembly path

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Always use r0 to store the vcpu pointer and change the macro parameters
to be more meaningful, and use named parameters when calling them to
signify what we're doing.

Cc: Will Deacon <will.deacon@xxxxxxx>
Cc: Marc Zyngier <marc.zyngier@xxxxxxx>
Signed-off-by: Christoffer Dall <c.dall@xxxxxxxxxxxxxxxxxxxxxx>
---
 arch/arm/kvm/interrupts.S      |  113 ++++++++---------
 arch/arm/kvm/interrupts_head.S |  262 +++++++++++++++++++++++-----------------
 2 files changed, 210 insertions(+), 165 deletions(-)

diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 91bb9c5..3b3ab73 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -90,16 +90,16 @@ ENDPROC(__kvm_flush_vm_context)
  */
 ENTRY(__kvm_vcpu_run)
 	@ Save the vcpu pointer
-	mcr	p15, 4, r0, c13, c0, 2	@ HTPIDR
+	mcr	p15, 4, vcpu, c13, c0, 2	@ HTPIDR
 
 	save_host_regs
 
-	restore_vgic_state r0
-	restore_timer_state r0
+	restore_vgic_state
+	restore_timer_state
 
 	@ Store hardware CP15 state and load guest state
-	read_cp15_state
-	write_cp15_state 1, r0
+	read_cp15_state store_to_vcpu = 0
+	write_cp15_state read_from_vcpu = 1
 
 	@ If the host kernel has not been configured with VFPv3 support,
 	@ then it is safer if we deny guests from using it as well.
@@ -112,28 +112,28 @@ ENTRY(__kvm_vcpu_run)
 #endif
 
 	@ Configure Hyp-role
-	configure_hyp_role 1, r0
+	configure_hyp_role vmentry
 
 	@ Trap coprocessor CRx accesses
-	set_hstr 1
-	set_hcptr 1, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
-	set_hdcr 1
+	set_hstr vmentry
+	set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
+	set_hdcr vmentry
 
 	@ Write configured ID register into MIDR alias
-	ldr	r1, [r0, #VCPU_MIDR]
+	ldr	r1, [vcpu, #VCPU_MIDR]
 	mcr	p15, 4, r1, c0, c0, 0
 
 	@ Write guest view of MPIDR into VMPIDR
-	ldr	r1, [r0, #CP15_OFFSET(c0_MPIDR)]
+	ldr	r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
 	mcr	p15, 4, r1, c0, c0, 5
 
 	@ Set up guest memory translation
-	ldr	r1, [r0, #VCPU_KVM]
+	ldr	r1, [vcpu, #VCPU_KVM]
 	add	r1, r1, #KVM_VTTBR
 	ldrd	r2, r3, [r1]
 	mcrr	p15, 6, r2, r3, c2	@ Write VTTBR
 
-	@ At this point, r0 must contain the pointer to the VCPU
+	@ We're all done, just restore the GPRs and go to the guest
 	restore_guest_regs
 	clrex				@ Clear exclusive monitor
 	eret
@@ -141,9 +141,9 @@ ENTRY(__kvm_vcpu_run)
 __kvm_vcpu_return:
 	/*
 	 * return convention:
-	 * vcpu r0, r1, r2 saved on the stack
-	 * r0: exception code
-	 * r1: vcpu pointer
+	 * guest r0, r1, r2 saved on the stack
+	 * r0: vcpu pointer
+	 * r1: exception code
 	 */
 	save_guest_regs
 
@@ -153,9 +153,9 @@ __kvm_vcpu_return:
 	mcrr	p15, 6, r2, r3, c2	@ Write VTTBR
 
 	@ Don't trap coprocessor accesses for host kernel
-	set_hstr 0
-	set_hdcr 0
-	set_hcptr 0, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
+	set_hstr vmexit
+	set_hdcr vmexit
+	set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
 
 #ifdef CONFIG_VFPv3
 	@ Save floating point registers we if let guest use them.
@@ -163,9 +163,9 @@ __kvm_vcpu_return:
 	bne	after_vfp_restore
 
 	@ Switch VFP/NEON hardware state to the host's
-	add	r7, r1, #VCPU_VFP_GUEST
+	add	r7, vcpu, #VCPU_VFP_GUEST
 	store_vfp_state r7
-	add	r7, r1, #VCPU_VFP_HOST
+	add	r7, vcpu, #VCPU_VFP_HOST
 	ldr	r7, [r7]
 	restore_vfp_state r7
 
@@ -176,7 +176,7 @@ after_vfp_restore:
 #endif
 
 	@ Reset Hyp-role
-	configure_hyp_role 0, r1
+	configure_hyp_role vmexit
 
 	@ Let host read hardware MIDR
 	mrc	p15, 0, r2, c0, c0, 0
@@ -187,14 +187,15 @@ after_vfp_restore:
 	mcr	p15, 4, r2, c0, c0, 5
 
 	@ Store guest CP15 state and restore host state
-	read_cp15_state 1, r1
-	write_cp15_state
+	read_cp15_state store_to_vcpu = 1
+	write_cp15_state read_from_vcpu = 0
 
-	save_timer_state r1
-	save_vgic_state	r1
+	save_timer_state
+	save_vgic_state
 
 	restore_host_regs
 	clrex				@ Clear exclusive monitor
+	mov	r0, r1			@ Return the return code
 	bx	lr			@ return to IOCTL
 
 
@@ -218,8 +219,8 @@ ENTRY(__kvm_va_to_pa)
 	orr	r1, r1, r2
 
 	@ This swaps too many registers, but we're in the slow path anyway.
-	read_cp15_state
-	write_cp15_state 1, r0
+	read_cp15_state store_to_vcpu = 0
+	write_cp15_state read_from_vcpu = 1
 
 	ands	r2, r1, #1
 	bic	r1, r1, r2
@@ -228,8 +229,8 @@ ENTRY(__kvm_va_to_pa)
 	isb
 
 	@ Restore host state.
-	read_cp15_state 1, r0
-	write_cp15_state
+	read_cp15_state store_to_vcpu = 1
+	write_cp15_state read_from_vcpu = 0
 
 	mrrc	p15, 0, r0, r1, c7	@ PAR
 	pop	{r4-r12}
@@ -277,20 +278,20 @@ ENTRY(kvm_call_hyp)
 	ands	r1, r1, #0xff
 	beq	99f
 
-	load_vcpu	r1		@ Load VCPU pointer
+	load_vcpu			@ Load VCPU pointer
 	.if \exception_code == ARM_EXCEPTION_DATA_ABORT
 	mrc	p15, 4, r2, c5, c2, 0	@ HSR
-	mrc	p15, 4, r0, c6, c0, 0	@ HDFAR
-	str	r2, [r1, #VCPU_HSR]
-	str	r0, [r1, #VCPU_HxFAR]
+	mrc	p15, 4, r1, c6, c0, 0	@ HDFAR
+	str	r2, [r0, #VCPU_HSR]
+	str	r1, [r0, #VCPU_HxFAR]
 	.endif
 	.if \exception_code == ARM_EXCEPTION_PREF_ABORT
 	mrc	p15, 4, r2, c5, c2, 0	@ HSR
-	mrc	p15, 4, r0, c6, c0, 2	@ HIFAR
-	str	r2, [r1, #VCPU_HSR]
-	str	r0, [r1, #VCPU_HxFAR]
+	mrc	p15, 4, r1, c6, c0, 2	@ HIFAR
+	str	r2, [r0, #VCPU_HSR]
+	str	r1, [r0, #VCPU_HxFAR]
 	.endif
-	mov	r0, #\exception_code
+	mov	r1, #\exception_code
 	b	__kvm_vcpu_return
 
 	@ We were in the host already. Let's craft a panic-ing return to SVC.
@@ -351,20 +352,20 @@ hyp_hvc:
 	push	{r0, r1, r2}
 
 	@ Check syndrome register
-	mrc	p15, 4, r0, c5, c2, 0	@ HSR
-	lsr	r1, r0, #HSR_EC_SHIFT
+	mrc	p15, 4, r1, c5, c2, 0	@ HSR
+	lsr	r0, r1, #HSR_EC_SHIFT
 #ifdef CONFIG_VFPv3
-	cmp	r1, #HSR_EC_CP_0_13
+	cmp	r0, #HSR_EC_CP_0_13
 	beq	switch_to_guest_vfp
 #endif
-	cmp	r1, #HSR_EC_HVC
+	cmp	r0, #HSR_EC_HVC
 	bne	guest_trap		@ Not HVC instr.
 
 	/*
 	 * Let's check if the HVC came from VMID 0 and allow simple
 	 * switch to Hyp mode
 	 */
-	mrrc    p15, 6, r1, r2, c2
+	mrrc    p15, 6, r0, r2, c2
 	lsr     r2, r2, #16
 	and     r2, r2, #0xff
 	cmp     r2, #0
@@ -391,19 +392,19 @@ THUMB(	orr	lr, #1)
 	eret
 
 guest_trap:
-	load_vcpu	r1		@ Load VCPU pointer
-	str	r0, [r1, #VCPU_HSR]
+	load_vcpu			@ Load VCPU pointer to r0
+	str	r1, [vcpu, #VCPU_HSR]
 
 	@ Check if we need the fault information
-	lsr	r0, r0, #HSR_EC_SHIFT
-	cmp	r0, #HSR_EC_IABT
+	lsr	r1, r1, #HSR_EC_SHIFT
+	cmp	r1, #HSR_EC_IABT
 	mrceq	p15, 4, r2, c6, c0, 2	@ HIFAR
 	beq	2f
-	cmp	r0, #HSR_EC_DABT
+	cmp	r1, #HSR_EC_DABT
 	bne	1f
 	mrc	p15, 4, r2, c6, c0, 0	@ HDFAR
 
-2:	str	r2, [r1, #VCPU_HxFAR]
+2:	str	r2, [vcpu, #VCPU_HxFAR]
 
 	/*
 	 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
@@ -436,10 +437,10 @@ guest_trap:
 	lsl	r2, r2, #4
 	orr	r2, r2, r1, lsl #24
 
-3:	load_vcpu	r1
-	str	r2, [r1, #VCPU_HPFAR]
+3:	load_vcpu			@ Load VCPU pointer to r0
+	str	r2, [r0, #VCPU_HPFAR]
 
-1:	mov	r0, #ARM_EXCEPTION_HVC
+1:	mov	r1, #ARM_EXCEPTION_HVC
 	b	__kvm_vcpu_return
 
 4:	pop	{r0, r1, r2}		@ Failed translation, return to guest
@@ -453,11 +454,11 @@ guest_trap:
  */
 #ifdef CONFIG_VFPv3
 switch_to_guest_vfp:
-	load_vcpu	r0		@ Load VCPU pointer
+	load_vcpu			@ Load VCPU pointer to r0
 	push	{r3-r7}
 
 	@ NEON/VFP used.  Turn on VFP access.
-	set_hcptr 0, (HCPTR_TCP(10) | HCPTR_TCP(11))
+	set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
 
 	@ Switch VFP/NEON hardware state to the guest's
 	add	r7, r0, #VCPU_VFP_HOST
@@ -474,8 +475,8 @@ switch_to_guest_vfp:
 	.align
 hyp_irq:
 	push	{r0, r1, r2}
-	mov	r0, #ARM_EXCEPTION_IRQ
-	load_vcpu	r1		@ Load VCPU pointer
+	mov	r1, #ARM_EXCEPTION_IRQ
+	load_vcpu			@ Load VCPU pointer to r0
 	b	__kvm_vcpu_return
 
 	.align
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index ece84d1..94a9ee1 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -3,6 +3,12 @@
 #define VCPU_USR_LR		(VCPU_USR_REG(14))
 #define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4))
 
+/*
+ * Many of these macros need to access the VCPU structure, which is always
+ * held in r0.
+ */
+vcpu	.req	r0		@ vcpu pointer always in r0
+
 /* Clobbers {r2-r6} */
 .macro store_vfp_state vfp_base
 	@ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions
@@ -56,6 +62,10 @@
 	push	{r2, r3, r4}
 .endm
 
+/*
+ * Store all host persistent registers on the stack.
+ * Clobbers all registers, in all modes, except r0 and r1.
+ */
 .macro save_host_regs
 	/* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */
 	mrs	r2, ELR_hyp
@@ -91,6 +101,10 @@
 	msr	SPSR_\mode, r4
 .endm
 
+/*
+ * Restore all host registers from the stack.
+ * Clobbers all registers, in all modes, except r0 and r1.
+ */
 .macro restore_host_regs
 	pop	{r2-r9}
 	msr	r8_fiq, r2
@@ -119,11 +133,13 @@
 /*
  * Restore SP, LR and SPSR for a given mode. offset is the offset of
  * this mode's registers from the VCPU base.
- * r0: VCPU address
+ *
+ * Assumes vcpu pointer in vcpu reg
+ *
  * Clobbers r1, r2, r3, r4.
  */
 .macro restore_guest_regs_mode mode, offset
-	add	r1, r0, \offset
+	add	r1, vcpu, \offset
 	ldm	r1, {r2, r3, r4}
 	msr	SP_\mode, r2
 	msr	LR_\mode, r3
@@ -131,8 +147,10 @@
 .endm
 
 /*
- * Restore all guest registers.
- * r0: VCPU address
+ * Restore all guest registers from the vcpu struct.
+ *
+ * Assumes vcpu pointer in vcpu reg
+ *
  * Clobbers *all* registers.
  */
 .macro restore_guest_regs
@@ -141,7 +159,7 @@
 	restore_guest_regs_mode und, #VCPU_UND_REGS
 	restore_guest_regs_mode irq, #VCPU_IRQ_REGS
 
-	add	r1, r0, #VCPU_FIQ_REGS
+	add	r1, vcpu, #VCPU_FIQ_REGS
 	ldm	r1, {r2-r9}
 	msr	r8_fiq, r2
 	msr	r9_fiq, r3
@@ -153,28 +171,30 @@
 	msr	SPSR_fiq, r9
 
 	@ Load return state
-	ldr	r2, [r0, #VCPU_PC]
-	ldr	r3, [r0, #VCPU_CPSR]
+	ldr	r2, [vcpu, #VCPU_PC]
+	ldr	r3, [vcpu, #VCPU_CPSR]
 	msr	ELR_hyp, r2
 	msr	SPSR_cxsf, r3
 
 	@ Load user registers
-	ldr	r2, [r0, #VCPU_USR_SP]
-	ldr	r3, [r0, #VCPU_USR_LR]
+	ldr	r2, [vcpu, #VCPU_USR_SP]
+	ldr	r3, [vcpu, #VCPU_USR_LR]
 	msr	SP_usr, r2
 	mov	lr, r3
-	add	r0, r0, #(VCPU_USR_REGS)
-	ldm	r0, {r0-r12}
+	add	vcpu, vcpu, #(VCPU_USR_REGS)
+	ldm	vcpu, {r0-r12}
 .endm
 
 /*
  * Save SP, LR and SPSR for a given mode. offset is the offset of
  * this mode's registers from the VCPU base.
- * r1: VCPU address
+ *
+ * Assumes vcpu pointer in vcpu reg
+ *
  * Clobbers r2, r3, r4, r5.
  */
 .macro save_guest_regs_mode mode, offset
-	add	r2, r1, \offset
+	add	r2, vcpu, \offset
 	mrs	r3, SP_\mode
 	mrs	r4, LR_\mode
 	mrs	r5, SPSR_\mode
@@ -182,28 +202,30 @@
 .endm
 
 /*
- * Save all guest registers
- * r1: VCPU address
+ * Save all guest registers to the vcpu struct
  * Expects guest's r0, r1, r2 on the stack.
+ *
+ * Assumes vcpu pointer in vcpu reg
+ *
  * Clobbers r2, r3, r4, r5.
  */
 .macro save_guest_regs
 	@ Store usr registers
-	add	r2, r1, #VCPU_USR_REG(3)
+	add	r2, vcpu, #VCPU_USR_REG(3)
 	stm	r2, {r3-r12}
-	add	r2, r1, #VCPU_USR_REG(0)
+	add	r2, vcpu, #VCPU_USR_REG(0)
 	pop	{r3, r4, r5}		@ r0, r1, r2
 	stm	r2, {r3, r4, r5}
 	mrs	r2, SP_usr
 	mov	r3, lr
-	str	r2, [r1, #VCPU_USR_SP]
-	str	r3, [r1, #VCPU_USR_LR]
+	str	r2, [vcpu, #VCPU_USR_SP]
+	str	r3, [vcpu, #VCPU_USR_LR]
 
 	@ Store return state
 	mrs	r2, ELR_hyp
 	mrs	r3, spsr
-	str	r2, [r1, #VCPU_PC]
-	str	r3, [r1, #VCPU_CPSR]
+	str	r2, [vcpu, #VCPU_PC]
+	str	r3, [vcpu, #VCPU_CPSR]
 
 	@ Store other guest registers
 	save_guest_regs_mode svc, #VCPU_SVC_REGS
@@ -213,11 +235,14 @@
 .endm
 
 /* Reads cp15 registers from hardware and stores them in memory
- * @vcpu:   If 0, registers are written in-order to the stack,
- * 	    otherwise to the VCPU struct pointed to by vcpup
- * @vcpup:  Register pointing to VCPU struct
+ * @store_to_vcpu: If 0, registers are written in-order to the stack,
+ * 		   otherwise to the VCPU struct pointed to by vcpup
+ *
+ * Assumes vcpu pointer in vcpu reg
+ *
+ * Clobbers r2 - r12
  */
-.macro read_cp15_state vcpu=0, vcpup
+.macro read_cp15_state store_to_vcpu
 	mrc	p15, 0, r2, c1, c0, 0	@ SCTLR
 	mrc	p15, 0, r3, c1, c0, 2	@ CPACR
 	mrc	p15, 0, r4, c2, c0, 2	@ TTBCR
@@ -228,21 +253,21 @@
 	mrc	p15, 0, r11, c10, c2, 1	@ NMRR
 	mrc	p15, 2, r12, c0, c0, 0	@ CSSELR
 
-	.if \vcpu == 0
+	.if \store_to_vcpu == 0
 	push	{r2-r12}		@ Push CP15 registers
 	.else
-	str	r2, [\vcpup, #CP15_OFFSET(c1_SCTLR)]
-	str	r3, [\vcpup, #CP15_OFFSET(c1_CPACR)]
-	str	r4, [\vcpup, #CP15_OFFSET(c2_TTBCR)]
-	str	r5, [\vcpup, #CP15_OFFSET(c3_DACR)]
-	add	\vcpup, \vcpup, #CP15_OFFSET(c2_TTBR0)
-	strd	r6, r7, [\vcpup]
-	add	\vcpup, \vcpup, #CP15_OFFSET(c2_TTBR1) - CP15_OFFSET(c2_TTBR0)
-	strd	r8, r9, [\vcpup]
-	sub	\vcpup, \vcpup, #CP15_OFFSET(c2_TTBR1)
-	str	r10, [\vcpup, #CP15_OFFSET(c10_PRRR)]
-	str	r11, [\vcpup, #CP15_OFFSET(c10_NMRR)]
-	str	r12, [\vcpup, #CP15_OFFSET(c0_CSSELR)]
+	str	r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
+	str	r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
+	str	r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
+	str	r5, [vcpu, #CP15_OFFSET(c3_DACR)]
+	add	vcpu, vcpu, #CP15_OFFSET(c2_TTBR0)
+	strd	r6, r7, [vcpu]
+	add	vcpu, vcpu, #CP15_OFFSET(c2_TTBR1) - CP15_OFFSET(c2_TTBR0)
+	strd	r8, r9, [vcpu]
+	sub	vcpu, vcpu, #CP15_OFFSET(c2_TTBR1)
+	str	r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
+	str	r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
+	str	r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
 	.endif
 
 	mrc	p15, 0, r2, c13, c0, 1	@ CID
@@ -257,43 +282,45 @@
 	mrc	p15, 0, r11, c6, c0, 2	@ IFAR
 	mrc	p15, 0, r12, c12, c0, 0	@ VBAR
 
-	.if \vcpu == 0
+	.if \store_to_vcpu == 0
 	push	{r2-r12}		@ Push CP15 registers
 	.else
-	str	r2, [\vcpup, #CP15_OFFSET(c13_CID)]
-	str	r3, [\vcpup, #CP15_OFFSET(c13_TID_URW)]
-	str	r4, [\vcpup, #CP15_OFFSET(c13_TID_URO)]
-	str	r5, [\vcpup, #CP15_OFFSET(c13_TID_PRIV)]
-	str	r6, [\vcpup, #CP15_OFFSET(c5_DFSR)]
-	str	r7, [\vcpup, #CP15_OFFSET(c5_IFSR)]
-	str	r8, [\vcpup, #CP15_OFFSET(c5_ADFSR)]
-	str	r9, [\vcpup, #CP15_OFFSET(c5_AIFSR)]
-	str	r10, [\vcpup, #CP15_OFFSET(c6_DFAR)]
-	str	r11, [\vcpup, #CP15_OFFSET(c6_IFAR)]
-	str	r12, [\vcpup, #CP15_OFFSET(c12_VBAR)]
+	str	r2, [vcpu, #CP15_OFFSET(c13_CID)]
+	str	r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
+	str	r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
+	str	r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
+	str	r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
+	str	r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
+	str	r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
+	str	r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
+	str	r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
+	str	r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
+	str	r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
 	.endif
 .endm
 
-/* Reads cp15 registers from memory and writes them to hardware
- * @vcpu:   If 0, registers are read in-order from the stack,
- * 	    otherwise from the VCPU struct pointed to by vcpup
- * @vcpup:  Register pointing to VCPU struct
+/*
+ * Reads cp15 registers from memory and writes them to hardware
+ * @read_from_vcpu: If 0, registers are read in-order from the stack,
+ *		    otherwise from the VCPU struct pointed to by vcpup
+ *
+ * Assumes vcpu pointer in vcpu reg
  */
-.macro write_cp15_state vcpu=0, vcpup
-	.if \vcpu == 0
+.macro write_cp15_state read_from_vcpu
+	.if \read_from_vcpu == 0
 	pop	{r2-r12}
 	.else
-	ldr	r2, [\vcpup, #CP15_OFFSET(c13_CID)]
-	ldr	r3, [\vcpup, #CP15_OFFSET(c13_TID_URW)]
-	ldr	r4, [\vcpup, #CP15_OFFSET(c13_TID_URO)]
-	ldr	r5, [\vcpup, #CP15_OFFSET(c13_TID_PRIV)]
-	ldr	r6, [\vcpup, #CP15_OFFSET(c5_DFSR)]
-	ldr	r7, [\vcpup, #CP15_OFFSET(c5_IFSR)]
-	ldr	r8, [\vcpup, #CP15_OFFSET(c5_ADFSR)]
-	ldr	r9, [\vcpup, #CP15_OFFSET(c5_AIFSR)]
-	ldr	r10, [\vcpup, #CP15_OFFSET(c6_DFAR)]
-	ldr	r11, [\vcpup, #CP15_OFFSET(c6_IFAR)]
-	ldr	r12, [\vcpup, #CP15_OFFSET(c12_VBAR)]
+	ldr	r2, [vcpu, #CP15_OFFSET(c13_CID)]
+	ldr	r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
+	ldr	r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
+	ldr	r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
+	ldr	r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
+	ldr	r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
+	ldr	r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
+	ldr	r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
+	ldr	r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
+	ldr	r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
+	ldr	r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
 	.endif
 
 	mcr	p15, 0, r2, c13, c0, 1	@ CID
@@ -308,21 +335,21 @@
 	mcr	p15, 0, r11, c6, c0, 2	@ IFAR
 	mcr	p15, 0, r12, c12, c0, 0	@ VBAR
 
-	.if \vcpu == 0
+	.if \read_from_vcpu == 0
 	pop	{r2-r12}
 	.else
-	ldr	r2, [\vcpup, #CP15_OFFSET(c1_SCTLR)]
-	ldr	r3, [\vcpup, #CP15_OFFSET(c1_CPACR)]
-	ldr	r4, [\vcpup, #CP15_OFFSET(c2_TTBCR)]
-	ldr	r5, [\vcpup, #CP15_OFFSET(c3_DACR)]
-	add	\vcpup, \vcpup, #CP15_OFFSET(c2_TTBR0)
-	ldrd	r6, r7, [\vcpup]
-	add	\vcpup, \vcpup, #CP15_OFFSET(c2_TTBR1) - CP15_OFFSET(c2_TTBR0)
-	ldrd	r8, r9, [\vcpup]
-	sub	\vcpup, \vcpup, #CP15_OFFSET(c2_TTBR1)
-	ldr	r10, [\vcpup, #CP15_OFFSET(c10_PRRR)]
-	ldr	r11, [\vcpup, #CP15_OFFSET(c10_NMRR)]
-	ldr	r12, [\vcpup, #CP15_OFFSET(c0_CSSELR)]
+	ldr	r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
+	ldr	r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
+	ldr	r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
+	ldr	r5, [vcpu, #CP15_OFFSET(c3_DACR)]
+	add	vcpu, vcpu, #CP15_OFFSET(c2_TTBR0)
+	ldrd	r6, r7, [vcpu]
+	add	vcpu, vcpu, #CP15_OFFSET(c2_TTBR1) - CP15_OFFSET(c2_TTBR0)
+	ldrd	r8, r9, [vcpu]
+	sub	vcpu, vcpu, #CP15_OFFSET(c2_TTBR1)
+	ldr	r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
+	ldr	r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
+	ldr	r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
 	.endif
 
 	mcr	p15, 0, r2, c1, c0, 0	@ SCTLR
@@ -338,18 +365,19 @@
 
 /*
  * Save the VGIC CPU state into memory
- * @vcpup: Register pointing to VCPU struct
+ *
+ * Assumes vcpu pointer in vcpu reg
  */
-.macro save_vgic_state	vcpup
+.macro save_vgic_state
 #ifdef CONFIG_KVM_ARM_VGIC
 	/* Get VGIC VCTRL base into r2 */
-	ldr	r2, [\vcpup, #VCPU_KVM]
+	ldr	r2, [vcpu, #VCPU_KVM]
 	ldr	r2, [r2, #KVM_VGIC_VCTRL]
 	cmp	r2, #0
 	beq	2f
 
 	/* Compute the address of struct vgic_cpu */
-	add	r11, \vcpup, #VCPU_VGIC_CPU
+	add	r11, vcpu, #VCPU_VGIC_CPU
 
 	/* Save all interesting registers */
 	ldr	r3, [r2, #GICH_HCR]
@@ -384,18 +412,19 @@
 
 /*
  * Restore the VGIC CPU state from memory
- * @vcpup: Register pointing to VCPU struct
+ *
+ * Assumes vcpu pointer in vcpu reg
  */
-.macro restore_vgic_state	vcpup
+.macro restore_vgic_state
 #ifdef CONFIG_KVM_ARM_VGIC
 	/* Get VGIC VCTRL base into r2 */
-	ldr	r2, [\vcpup, #VCPU_KVM]
+	ldr	r2, [vcpu, #VCPU_KVM]
 	ldr	r2, [r2, #KVM_VGIC_VCTRL]
 	cmp	r2, #0
 	beq	2f
 
 	/* Compute the address of struct vgic_cpu */
-	add	r11, \vcpup, #VCPU_VGIC_CPU
+	add	r11, vcpu, #VCPU_VGIC_CPU
 
 	/* We only restore a minimal set of registers */
 	ldr	r3, [r11, #VGIC_CPU_HCR]
@@ -421,23 +450,29 @@
 #define CNTHCTL_PL1PCTEN	(1 << 0)
 #define CNTHCTL_PL1PCEN		(1 << 1)
 
-.macro save_timer_state	vcpup
+/*
+ * Save the timer state onto the VCPU and allow physical timer/counter access
+ * for the host.
+ *
+ * Assumes vcpu pointer in vcpu reg
+ */
+.macro save_timer_state
 #ifdef CONFIG_KVM_ARM_TIMER
-	ldr	r4, [\vcpup, #VCPU_KVM]
+	ldr	r4, [vcpu, #VCPU_KVM]
 	ldr	r2, [r4, #KVM_TIMER_ENABLED]
 	cmp	r2, #0
 	beq	1f
 
 	mrc	p15, 0, r2, c14, c3, 1	@ CNTV_CTL
 	and	r2, #3
-	str	r2, [\vcpup, #VCPU_TIMER_CNTV_CTL]
+	str	r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
 	bic	r2, #1			@ Clear ENABLE
 	mcr	p15, 0, r2, c14, c3, 1	@ CNTV_CTL
 	isb
 
 	mrrc	p15, 3, r2, r3, c14	@ CNTV_CVAL
-	str	r3, [\vcpup, #VCPU_TIMER_CNTV_CVALH]
-	str	r2, [\vcpup, #VCPU_TIMER_CNTV_CVALL]
+	str	r3, [vcpu, #VCPU_TIMER_CNTV_CVALH]
+	str	r2, [vcpu, #VCPU_TIMER_CNTV_CVALL]
 
 1:
 #endif
@@ -447,7 +482,13 @@
 	mcr	p15, 4, r2, c14, c1, 0	@ CNTHCTL
 .endm
 
-.macro restore_timer_state vcpup
+/*
+ * Load the timer state from the VCPU and deny physical timer/counter access
+ * for the host.
+ *
+ * Assumes vcpu pointer in vcpu reg
+ */
+.macro restore_timer_state
 	@ Disallow physical timer access for the guest
 	@ Physical counter access is allowed
 	mrc	p15, 4, r2, c14, c1, 0	@ CNTHCTL
@@ -456,7 +497,7 @@
 	mcr	p15, 4, r2, c14, c1, 0	@ CNTHCTL
 
 #ifdef CONFIG_KVM_ARM_TIMER
-	ldr	r4, [\vcpup, #VCPU_KVM]
+	ldr	r4, [vcpu, #VCPU_KVM]
 	ldr	r2, [r4, #KVM_TIMER_ENABLED]
 	cmp	r2, #0
 	beq	1f
@@ -466,11 +507,11 @@
 	mcrr	p15, 4, r2, r3, c14	@ CNTVOFF
 	isb
 
-	ldr	r3, [\vcpup, #VCPU_TIMER_CNTV_CVALH]
-	ldr	r2, [\vcpup, #VCPU_TIMER_CNTV_CVALL]
+	ldr	r3, [vcpu, #VCPU_TIMER_CNTV_CVALH]
+	ldr	r2, [vcpu, #VCPU_TIMER_CNTV_CVALL]
 	mcrr	p15, 3, r2, r3, c14	@ CNTV_CVAL
 
-	ldr	r2, [\vcpup, #VCPU_TIMER_CNTV_CTL]
+	ldr	r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
 	and	r2, #3
 	mcr	p15, 0, r2, c14, c3, 1	@ CNTV_CTL
 	isb
@@ -478,12 +519,15 @@
 #endif
 .endm
 
+.equ vmentry,	0
+.equ vmexit,	1
+
 /* Configures the HSTR (Hyp System Trap Register) on entry/return
  * (hardware reset value is 0) */
-.macro set_hstr entry
+.macro set_hstr operation
 	mrc	p15, 4, r2, c1, c1, 3
 	ldr	r3, =HSTR_T(15)
-	.if \entry == 1
+	.if \operation == vmentry
 	orr	r2, r2, r3		@ Trap CR{15}
 	.else
 	bic	r2, r2, r3		@ Don't trap any CRx accesses
@@ -493,10 +537,10 @@
 
 /* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
  * (hardware reset value is 0). Keep previous value in r2. */
-.macro set_hcptr entry, mask
+.macro set_hcptr operation, mask
 	mrc	p15, 4, r2, c1, c1, 2
 	ldr	r3, =\mask
-	.if \entry == 1
+	.if \operation == vmentry
 	orr	r3, r2, r3		@ Trap coproc-accesses defined in mask
 	.else
 	bic	r3, r2, r3		@ Don't trap defined coproc-accesses
@@ -506,10 +550,10 @@
 
 /* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
  * (hardware reset value is 0) */
-.macro set_hdcr entry
+.macro set_hdcr operation
 	mrc	p15, 4, r2, c1, c1, 1
 	ldr	r3, =(HDCR_TPM|HDCR_TPMCR)
-	.if \entry == 1
+	.if \operation == vmentry
 	orr	r2, r2, r3		@ Trap some perfmon accesses
 	.else
 	bic	r2, r2, r3		@ Don't trap any perfmon accesses
@@ -518,13 +562,13 @@
 .endm
 
 /* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
-.macro configure_hyp_role entry, vcpu_ptr
+.macro configure_hyp_role operation
 	mrc	p15, 4, r2, c1, c1, 0	@ HCR
 	bic	r2, r2, #HCR_VIRT_EXCP_MASK
 	ldr	r3, =HCR_GUEST_MASK
-	.if \entry == 1
+	.if \operation == vmentry
 	orr	r2, r2, r3
-	ldr	r3, [\vcpu_ptr, #VCPU_IRQ_LINES]
+	ldr	r3, [vcpu, #VCPU_IRQ_LINES]
 	orr	r2, r2, r3
 	.else
 	bic	r2, r2, r3
@@ -532,6 +576,6 @@
 	mcr	p15, 4, r2, c1, c1, 0
 .endm
 
-.macro load_vcpu reg
-	mrc	p15, 4, \reg, c13, c0, 2	@ HTPIDR
+.macro load_vcpu
+	mrc	p15, 4, vcpu, c13, c0, 2	@ HTPIDR
 .endm
-- 
1.7.9.5

_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/cucslists/listinfo/kvmarm


[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux