[PATCH 1/3] KVM: ARM: __kvm_va_to_pa()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Create a helper to do the guest va->pa mapping.  Then take the code out of the
exit path, and use normal C.

copy_from_guest_va() is a bit more generic than it needs to be, but
not much.

Signed-off-by: Rusty Russell <rusty.russell@xxxxxxxxxx>

diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 85bd676..ee21cd2 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -54,6 +54,8 @@ extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
 
 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+
+extern u64 __kvm_va_to_pa(struct kvm_vcpu *vcpu, u32 va, bool priv);
 #endif
 
 #endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 778d2af..79b5318 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -131,9 +131,6 @@ struct kvm_vcpu_arch {
 	u32 hdfar;		/* Hyp Data Fault Address Register */
 	u32 hifar;		/* Hyp Inst. Fault Address Register */
 	u32 hpfar;		/* Hyp IPA Fault Address Register */
-	u64 pc_ipa;		/* IPA for the current PC (VA to PA result) */
-	u64 pc_ipa2;		/* same as above, but for non-aligned wide thumb
-				   instructions */
 
 	/* Floating point registers (VFP and Advanced SIMD/NEON) */
 	struct vfp_hard_struct vfp_guest;
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index aca6c2c..bb18fd4 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -184,8 +184,6 @@ int main(void)
   DEFINE(VCPU_HDFAR,		offsetof(struct kvm_vcpu, arch.hdfar));
   DEFINE(VCPU_HIFAR,		offsetof(struct kvm_vcpu, arch.hifar));
   DEFINE(VCPU_HPFAR,		offsetof(struct kvm_vcpu, arch.hpfar));
-  DEFINE(VCPU_PC_IPA,		offsetof(struct kvm_vcpu, arch.pc_ipa));
-  DEFINE(VCPU_PC_IPA2,		offsetof(struct kvm_vcpu, arch.pc_ipa2));
   DEFINE(VCPU_HYP_PC,		offsetof(struct kvm_vcpu, arch.hyp_pc));
   DEFINE(KVM_VTTBR,		offsetof(struct kvm, arch.vttbr));
 #endif
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index b70394a..d282860 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -510,6 +510,37 @@ after_vfp_restore:
 	clrex				@ Clear exclusive monitor
 	bx	lr			@ return to IOCTL
 
+@ Arguments:
+@  r0: pointer to vcpu struct
+@  r1: virtual address to map (rounded to page)
+@  r2: 1 = P1 (read) mapping, 0 = P0 (read) mapping.
+@ Returns 64 bit PAR value.
+ENTRY(__kvm_va_to_pa)
+	hvc	#0			@ switch to hyp-mode
+
+	push	{r4-r12}
+
+	@ Fold flag into r1, easier than using stack.
+	orr	r1, r1, r2
+
+	@ This swaps too many registers, but we're in the slow path anyway.
+	read_cp15_state
+	write_cp15_state 1, r0
+
+	ands	r2, r1, #1
+	sub	r1, r1, r2
+	mcrne	p15, 0, r1, c7, c8, 0	@ VA to PA, ATS1CPR
+	mcreq	p15, 0, r1, c7, c8, 2	@ VA to PA, ATS1CUR
+
+	@ Restore host state.
+	read_cp15_state 1, r0
+	write_cp15_state
+
+	mrrc	p15, 0, r0, r1, c7	@ PAR
+	pop	{r4-r12}
+	hvc	#0			@ Back to SVC
+	bx	lr
+
 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
 @  Hypervisor exception vector and handlers
 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@ -686,34 +717,6 @@ guest_trap:
 	cmpne	r2, #HSR_EC_DABT
 	bne	1f
 
-	@ For non-valid data aborts, get the offending instr. PA
-	lsr	r2, r0, #HSR_ISV_SHIFT
-	ands	r2, r2, #1
-	bne	2f
-	mrs	r3, ELR_hyp
-	mrs	r7, spsr
-	and	r7, r7, #0xf
-	cmp	r7, #0			@ fault happened in user mode?
-	mcreq	p15, 0, r3, c7, c8, 2	@ VA to PA, ATS1CUR
-	mcrne	p15, 0, r3, c7, c8, 0	@ VA to PA, ATS1CPR
-	mrrc	p15, 0, r4, r5, c7	@ PAR
-	add	r6, r1, #VCPU_PC_IPA
-	strd	r4, r5, [r6]
-
-	@ Check if we might have a wide thumb instruction spill-over
-	ldr	r5, =0xfff
-	bic	r4, r3, r5		@ clear page mask
-	sub	r5, r5, #1		@ last 2-byte page bounday, 0xffe
-	cmp	r4, r5
-	bne	2f
-	add	r4, r3, #2		@ _really_ unlikely!
-	cmp	r7, #0			@ fault happened in user mode?
-	mcreq	p15, 0, r4, c7, c8, 2	@ VA to PA, ATS1CUR
-	mcrne	p15, 0, r4, c7, c8, 0	@ VA to PA, ATS1CPR
-	mrrc	p15, 0, r4, r5, c7	@ PAR
-	add	r6, r1, #VCPU_PC_IPA2
-	strd	r4, r5, [r6]
-
 2:	mrc	p15, 4, r2, c6, c0, 0	@ HDFAR
 	mrc	p15, 4, r3, c6, c0, 2	@ HIFAR
 	mrc	p15, 4, r4, c6, c0, 4	@ HPFAR
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 3df4fa8..6909a83 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -580,6 +580,62 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
 }
 
 /**
+ * copy_from_guest_va - copy memory from guest (very slow!)
+ * @vcpu:	vcpu pointer
+ * @dest:	memory to copy into
+ * @va:		virtual address in guest to copy from
+ * @len:	length to copy
+ * @priv:	use guest PL1 (ie. kernel) mappings, otherwise use guest PL0 mappings.
+ *
+ * Returns 0 on success, or errno.
+ */
+static int copy_from_guest_va(struct kvm_vcpu *vcpu,
+			      void *dest, unsigned long va, size_t len,
+			      bool priv)
+{
+	u64 par;
+	phys_addr_t pc_ipa;
+	size_t cur_len;
+	int err;
+
+	/* A 32-bit thumb2 instruction can actually go over a page boundary! */
+	do {
+		par = __kvm_va_to_pa(vcpu, va & PAGE_MASK, priv);
+		if (par & 1) {
+			kvm_err("I/O Abort from invalid instruction address? Wrong!\n");
+			return -EINVAL;
+		}
+
+		if (par & (1U << 11)) {
+			/* LPAE PAR format */
+			/*
+			 * TODO: Check if this ever happens
+			 * - called from Hyp mode
+			 */
+			pc_ipa = par & PAGE_MASK & ((1ULL << 32) - 1);
+		} else {
+			/* VMSAv7 PAR format */
+			pc_ipa = par & PAGE_MASK & ((1ULL << 40) - 1);
+		}
+		pc_ipa += va & ~PAGE_MASK;
+
+		/* Only copy up to end of page. */
+		cur_len = (pc_ipa & PAGE_MASK) + PAGE_SIZE - pc_ipa;
+		if (likely(cur_len > len))
+			cur_len = len;
+
+		err = kvm_read_guest(vcpu->kvm, pc_ipa, dest, cur_len);
+		if (unlikely(err))
+			return err;
+
+		len -= cur_len;
+		va += cur_len;
+	} while (unlikely(len != 0));
+
+	return 0;
+}
+
+/**
  * invalid_io_mem_abort -- Handle I/O aborts ISV bit is clear
  *
  * @vcpu:      The vcpu pointer
@@ -593,22 +649,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
 static int invalid_io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
 {
 	unsigned long instr;
-	phys_addr_t pc_ipa;
-
-	if (vcpu->arch.pc_ipa & 1) {
-		kvm_err("I/O Abort from invalid instruction address? Wrong!\n");
-		return -EINVAL;
-	}
-
-	if (vcpu->arch.pc_ipa & (1U << 11)) {
-		/* LPAE PAR format */
-		/* TODO: Check if this ever happens - called from Hyp mode */
-		pc_ipa = vcpu->arch.pc_ipa & PAGE_MASK & ((1ULL << 32) - 1);
-	} else {
-		/* VMSAv7 PAR format */
-		pc_ipa = vcpu->arch.pc_ipa & PAGE_MASK & ((1ULL << 40) - 1);
-	}
-	pc_ipa += vcpu->arch.regs.pc & ~PAGE_MASK;
+	int err;
 
 	if (vcpu->arch.regs.cpsr & PSR_T_BIT) {
 		/* TODO: Check validity of PC IPA and IPA2!!! */
@@ -617,9 +658,11 @@ static int invalid_io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
 		return -EINVAL;
 	}
 
-	if (kvm_read_guest(vcpu->kvm, pc_ipa, &instr, sizeof(instr))) {
+	err = copy_from_guest_va(vcpu, &instr, vcpu->arch.regs.pc, sizeof(instr),
+				 vcpu_mode_priv(vcpu));
+	if (err) {
 		kvm_err("Could not copy guest instruction\n");
-		return -EFAULT;
+		return err;
 	}
 
 	return kvm_emulate_mmio_ls(vcpu, fault_ipa, instr);
_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/cucslists/listinfo/kvmarm


[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux