[RFC PATCH 4/5] ARM: KVM: move usr regs to struct pt_regs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Now that our world-switch is mostly layout independant, it becomes
relatively easy to move the usr_regs array to struct pt_regs.

This gives us a common abstraction with the rest of the kernel, and
makes it similar to what is being done on the arm64 side.

Signed-off-by: Marc Zyngier <marc.zyngier@xxxxxxx>
---
 arch/arm/include/asm/kvm.h         |  5 ++---
 arch/arm/include/asm/kvm_emulate.h | 12 ++++++------
 arch/arm/kernel/asm-offsets.c      |  4 ++--
 arch/arm/kvm/arm.c                 | 10 +++++-----
 arch/arm/kvm/coproc.c              |  4 ++--
 arch/arm/kvm/coproc.h              |  8 ++++----
 arch/arm/kvm/emulate.c             | 30 +++++++++++++-----------------
 arch/arm/kvm/guest.c               |  2 +-
 arch/arm/kvm/mmu.c                 |  6 +++---
 arch/arm/kvm/reset.c               |  2 +-
 10 files changed, 39 insertions(+), 44 deletions(-)

diff --git a/arch/arm/include/asm/kvm.h b/arch/arm/include/asm/kvm.h
index 8101812..8c851e9 100644
--- a/arch/arm/include/asm/kvm.h
+++ b/arch/arm/include/asm/kvm.h
@@ -20,6 +20,7 @@
 #define __ARM_KVM_H__
 
 #include <asm/types.h>
+#include <asm/ptrace.h>
 
 #define __KVM_HAVE_GUEST_DEBUG
 #define __KVM_HAVE_IRQ_LINE
@@ -28,14 +29,12 @@
 	(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
 
 struct kvm_regs {
-	__u32 usr_regs[15];	/* R0_usr - R14_usr */
+	struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */
 	__u32 svc_regs[3];	/* SP_svc, LR_svc, SPSR_svc */
 	__u32 abt_regs[3];	/* SP_abt, LR_abt, SPSR_abt */
 	__u32 und_regs[3];	/* SP_und, LR_und, SPSR_und */
 	__u32 irq_regs[3];	/* SP_irq, LR_irq, SPSR_irq */
 	__u32 fiq_regs[8];	/* R8_fiq - R14_fiq, SPSR_fiq */
-	__u32 pc;		/* The program counter (r15) */
-	__u32 cpsr;		/* The guest CPSR */
 };
 
 /* Supported Processor Types */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 370a450..e543238 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -57,34 +57,34 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 /* Get vcpu register for current mode */
 static inline u32 *vcpu_reg(struct kvm_vcpu *vcpu, unsigned long reg_num)
 {
-	return vcpu_reg_mode(vcpu, reg_num, vcpu->arch.regs.cpsr);
+	return vcpu_reg_mode(vcpu, reg_num, vcpu->arch.regs.usr_regs.ARM_cpsr);
 }
 
 static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu)
 {
-	return vcpu_reg(vcpu, 15);
+	return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc;
 }
 
 static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu)
 {
-	return &vcpu->arch.regs.cpsr;
+	return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr;
 }
 
 /* Get vcpu SPSR for current mode */
 static inline u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
 {
-	return vcpu_spsr_mode(vcpu, vcpu->arch.regs.cpsr);
+	return vcpu_spsr_mode(vcpu, vcpu->arch.regs.usr_regs.ARM_cpsr);
 }
 
 static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
 {
-	unsigned long cpsr_mode = vcpu->arch.regs.cpsr & MODE_MASK;
+	unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
 	return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
 }
 
 static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
 {
-	unsigned long cpsr_mode = vcpu->arch.regs.cpsr & MODE_MASK;
+	unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
 	return cpsr_mode > USR_MODE;;
 }
 
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 1c4181e..ff3f43b 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -158,8 +158,8 @@ int main(void)
   DEFINE(VCPU_UND_REGS,		offsetof(struct kvm_vcpu, arch.regs.und_regs));
   DEFINE(VCPU_IRQ_REGS,		offsetof(struct kvm_vcpu, arch.regs.irq_regs));
   DEFINE(VCPU_FIQ_REGS,		offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
-  DEFINE(VCPU_PC,		offsetof(struct kvm_vcpu, arch.regs.pc));
-  DEFINE(VCPU_CPSR,		offsetof(struct kvm_vcpu, arch.regs.cpsr));
+  DEFINE(VCPU_PC,		offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
+  DEFINE(VCPU_CPSR,		offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
   DEFINE(VCPU_IRQ_LINES,	offsetof(struct kvm_vcpu, arch.irq_lines));
   DEFINE(VCPU_HSR,		offsetof(struct kvm_vcpu, arch.hsr));
   DEFINE(VCPU_HDFAR,		offsetof(struct kvm_vcpu, arch.hdfar));
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 7f2ea3d..491ac38 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -470,8 +470,8 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 	 * Guest called HVC instruction:
 	 * Let it know we don't want that by injecting an undefined exception.
 	 */
-	kvm_debug("hvc: %x (at %08x)", vcpu->arch.hsr & ((1 << 16) - 1),
-				     vcpu->arch.regs.pc);
+	kvm_debug("hvc: %x (at %08lx)", vcpu->arch.hsr & ((1 << 16) - 1),
+				     vcpu->arch.regs.usr_regs.ARM_pc);
 	kvm_debug("         HSR: %8x", vcpu->arch.hsr);
 	kvm_inject_undefined(vcpu);
 	return 1;
@@ -480,7 +480,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
 	/* We don't support SMC; don't do that. */
-	kvm_debug("smc: at %08x", vcpu->arch.regs.pc);
+	kvm_debug("smc: at %08lx", vcpu->arch.regs.usr_regs.ARM_pc);
 	kvm_inject_undefined(vcpu);
 	return 1;
 }
@@ -674,7 +674,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 		/**************************************************************
 		 * Enter the guest
 		 */
-		trace_kvm_entry(vcpu->arch.regs.pc);
+		trace_kvm_entry(vcpu->arch.regs.usr_regs.ARM_pc);
 		kvm_guest_enter();
 		vcpu->mode = IN_GUEST_MODE;
 
@@ -689,7 +689,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 		vcpu->mode = OUTSIDE_GUEST_MODE;
 		vcpu->arch.last_pcpu = smp_processor_id();
 		kvm_guest_exit();
-		trace_kvm_exit(vcpu->arch.regs.pc);
+		trace_kvm_exit(vcpu->arch.regs.usr_regs.ARM_pc);
 		/*
 		 * We may have taken a host interrupt in HYP mode (ie
 		 * while executing the guest). This interrupt is still
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 8b658dd..2bee16f 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -294,8 +294,8 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
 		}
 		/* If access function fails, it should complain. */
 	} else {
-		kvm_err("Unsupported guest CP15 access at: %08x\n",
-			vcpu->arch.regs.pc);
+		kvm_err("Unsupported guest CP15 access at: %08lx\n",
+			vcpu->arch.regs.usr_regs.ARM_pc);
 		print_cp_instr(params);
 	}
 	kvm_inject_undefined(vcpu);
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index 4857cfe..0f4bf0b 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -84,8 +84,8 @@ static inline bool read_zero(struct kvm_vcpu *vcpu,
 static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
 				      const struct coproc_params *params)
 {
-	kvm_debug("CP15 write to read-only register at: %08x\n",
-		  vcpu->arch.regs.pc);
+	kvm_debug("CP15 write to read-only register at: %08lx\n",
+		  vcpu->arch.regs.usr_regs.ARM_pc);
 	print_cp_instr(params);
 	return false;
 }
@@ -93,8 +93,8 @@ static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
 static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
 					const struct coproc_params *params)
 {
-	kvm_debug("CP15 read to write-only register at: %08x\n",
-		  vcpu->arch.regs.pc);
+	kvm_debug("CP15 read to write-only register at: %08lx\n",
+		  vcpu->arch.regs.usr_regs.ARM_pc);
 	print_cp_instr(params);
 	return false;
 }
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index 19a3d78..bac59c7 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -28,7 +28,7 @@
 #define REG_OFFSET(_reg) \
 	(offsetof(struct kvm_regs, _reg) / sizeof(u32))
 
-#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs[_num])
+#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
 
 static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
 	/* FIQ Registers */
@@ -43,7 +43,7 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
 		REG_OFFSET(fiq_regs[4]), /* r12 */
 		REG_OFFSET(fiq_regs[5]), /* r13 */
 		REG_OFFSET(fiq_regs[6]), /* r14 */
-		REG_OFFSET(pc)		 /* r15 */
+		REG_OFFSET(usr_regs.ARM_pc)
 	},
 
 	/* IRQ Registers */
@@ -55,7 +55,7 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
 		USR_REG_OFFSET(12),
 		REG_OFFSET(irq_regs[0]), /* r13 */
 		REG_OFFSET(irq_regs[1]), /* r14 */
-		REG_OFFSET(pc)	         /* r15 */
+		REG_OFFSET(usr_regs.ARM_pc)
 	},
 
 	/* SVC Registers */
@@ -67,7 +67,7 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
 		USR_REG_OFFSET(12),
 		REG_OFFSET(svc_regs[0]), /* r13 */
 		REG_OFFSET(svc_regs[1]), /* r14 */
-		REG_OFFSET(pc)		 /* r15 */
+		REG_OFFSET(usr_regs.ARM_pc)
 	},
 
 	/* ABT Registers */
@@ -79,7 +79,7 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
 		USR_REG_OFFSET(12),
 		REG_OFFSET(abt_regs[0]), /* r13 */
 		REG_OFFSET(abt_regs[1]), /* r14 */
-		REG_OFFSET(pc)	         /* r15 */
+		REG_OFFSET(usr_regs.ARM_pc)
 	},
 
 	/* UND Registers */
@@ -91,7 +91,7 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
 		USR_REG_OFFSET(12),
 		REG_OFFSET(und_regs[0]), /* r13 */
 		REG_OFFSET(und_regs[1]), /* r14 */
-		REG_OFFSET(pc)	         /* r15 */
+		REG_OFFSET(usr_regs.ARM_pc)
 	},
 
 	/* USR Registers */
@@ -100,10 +100,8 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
 		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
 		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
 		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
-		USR_REG_OFFSET(12),
-		REG_OFFSET(usr_regs[13]), /* r13 */
-		REG_OFFSET(usr_regs[14]), /* r14 */
-		REG_OFFSET(pc)	          /* r15 */
+		USR_REG_OFFSET(12), USR_REG_OFFSET(13),	USR_REG_OFFSET(14),
+		REG_OFFSET(usr_regs.ARM_pc)
 	},
 
 	/* SYS Registers */
@@ -112,10 +110,8 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
 		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
 		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
 		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
-		USR_REG_OFFSET(12),
-		REG_OFFSET(usr_regs[13]), /* r13 */
-		REG_OFFSET(usr_regs[14]), /* r14 */
-		REG_OFFSET(pc)	          /* r15 */
+		USR_REG_OFFSET(12), USR_REG_OFFSET(13),	USR_REG_OFFSET(14),
+		REG_OFFSET(usr_regs.ARM_pc)
 	},
 };
 
@@ -223,7 +219,7 @@ static int kvm_instr_index(u32 instr, u32 table[][2], int table_entries)
  */
 int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
-	trace_kvm_wfi(vcpu->arch.regs.pc);
+	trace_kvm_wfi(vcpu->arch.regs.usr_regs.ARM_pc);
 	kvm_vcpu_block(vcpu);
 	return 1;
 }
@@ -366,7 +362,7 @@ static unsigned long ls_word_calc_offset(struct kvm_vcpu *vcpu,
 			break;
 		case SCALE_SHIFT_ROR_RRX:
 			if (shift_imm == 0) {
-				u32 C = (vcpu->arch.regs.cpsr &
+				u32 C = (vcpu->arch.regs.usr_regs.ARM_cpsr &
 						(1U << PSR_BIT_C));
 				offset = (C << 31) | offset >> 1;
 			} else {
@@ -628,7 +624,7 @@ int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 {
 	bool is_thumb;
 
-	trace_kvm_mmio_emulate(vcpu->arch.regs.pc, instr, vcpu->arch.regs.cpsr);
+	trace_kvm_mmio_emulate(vcpu->arch.regs.usr_regs.ARM_pc, instr, vcpu->arch.regs.usr_regs.ARM_cpsr);
 
 	mmio->phys_addr = fault_ipa;
 	is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index de05c557c..65ae563 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -79,7 +79,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 	if (get_user(val, uaddr) != 0)
 		return -EFAULT;
 
-	if (off == KVM_REG_ARM_CORE_REG(cpsr)) {
+	if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) {
 		unsigned long mode = val & MODE_MASK;
 		switch (mode) {
 		case USR_MODE:
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 8f6761c..f955995 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -704,10 +704,10 @@ static bool copy_current_insn(struct kvm_vcpu *vcpu, unsigned long *instr)
 	is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
 	instr_len = (is_thumb) ? 2 : 4;
 
-	BUG_ON(!is_thumb && vcpu->arch.regs.pc & 0x3);
+	BUG_ON(!is_thumb && vcpu->arch.regs.usr_regs.ARM_pc & 0x3);
 
 	/* Now guest isn't running, we can va->pa map and copy atomically. */
-	ret = copy_from_guest_va(vcpu, instr, vcpu->arch.regs.pc, instr_len,
+	ret = copy_from_guest_va(vcpu, instr, vcpu->arch.regs.usr_regs.ARM_pc, instr_len,
 				 vcpu_mode_priv(vcpu));
 	if (!ret)
 		goto out;
@@ -715,7 +715,7 @@ static bool copy_current_insn(struct kvm_vcpu *vcpu, unsigned long *instr)
 	/* A 32-bit thumb2 instruction can actually go over a page boundary! */
 	if (is_thumb && is_wide_instruction(*instr)) {
 		*instr = *instr << 16;
-		ret = copy_from_guest_va(vcpu, instr, vcpu->arch.regs.pc + 2, 2,
+		ret = copy_from_guest_va(vcpu, instr, vcpu->arch.regs.usr_regs.ARM_pc + 2, 2,
 					 vcpu_mode_priv(vcpu));
 	}
 
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index bb17def..67ca4a3 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -34,7 +34,7 @@
 static const int a15_max_cpu_idx = 3;
 
 static struct kvm_regs a15_regs_reset = {
-	.cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
+	.usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
 };
 
 #ifdef CONFIG_KVM_ARM_TIMER
-- 
1.7.12



_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/cucslists/listinfo/kvmarm


[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux