[Android-virt] [PATCH 5/6] ARM: KVM: Add support for IO mapping at the guest level

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Moving to the VGIC implies giving access to some io devices
(the VGIC virtual CPU interface registers) to the guest.

Define kvm_phys_addr_ioremap() to map a device at a guest IPA.

Signed-off-by: Marc Zyngier <marc.zyngier at arm.com>
---
 arch/arm/include/asm/kvm_mmu.h |    2 ++
 arch/arm/kvm/mmu.c             |   36 ++++++++++++++++++++++++++++++++++--
 2 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index eb05401..71d6df4 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -36,6 +36,8 @@ void free_hyp_pmds(pgd_t *hyp_pgd);
 
 int kvm_alloc_stage2_pgd(struct kvm *kvm);
 void kvm_free_stage2_pgd(struct kvm *kvm);
+int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+			  phys_addr_t pa, unsigned long size);
 
 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 5c809b8..36bccaa 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -240,10 +240,15 @@ static void free_guest_pages(pte_t *pte, unsigned long addr)
 	pte_page = virt_to_page(pte);
 
 	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
+		unsigned long pfn;
+
 		if (!pte_present(*pte))
 			goto next_page;
-		page = pfn_to_page(pte_pfn(*pte));
-		put_page(page);
+		pfn = pte_pfn(*pte);
+		if (pfn_valid(pfn)) { /* Skip over device memory */
+			page = pfn_to_page(pfn);
+			put_page(page);
+		}
 		put_page(pte_page);
 next_page:
 		pte++;
@@ -438,6 +443,33 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	return ret;
 }
 
+int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
+			  phys_addr_t pa, unsigned long size)
+{
+	phys_addr_t addr, end;
+	pgprot_t prot;
+	int ret = 0;
+	unsigned long pfn;
+
+	end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
+	prot = __pgprot(get_mem_type_prot_pte(MT_DEVICE) | L_PTE_USER | L_PTE2_READ | L_PTE2_WRITE);
+	pfn = __phys_to_pfn(pa);
+
+	mutex_lock(&kvm->arch.pgd_mutex);
+	for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
+		pte_t pte = pfn_pte(pfn, prot);
+
+		ret = stage2_set_pte(kvm, addr, &pte);
+		if (ret)
+			break;
+
+		pfn++;
+	}
+	mutex_unlock(&kvm->arch.pgd_mutex);
+
+	return ret;
+}
+
 /**
  * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
  * @vcpu: The VCPU pointer
-- 
1.7.7.1





[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux