Moving to the VGIC implies giving access to some io devices (the VGIC virtual CPU interface registers) to the guest. Define kvm_phys_addr_ioremap() to map a device at a guest IPA. Signed-off-by: Marc Zyngier <marc.zyngier at arm.com> --- arch/arm/include/asm/kvm_mmu.h | 2 ++ arch/arm/kvm/mmu.c | 44 ++++++++++++++++++++++++++++++++++++++-- 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 422ec45..c59ecf9 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -35,6 +35,8 @@ void free_hyp_pmds(void); int kvm_alloc_stage2_pgd(struct kvm *kvm); void kvm_free_stage2_pgd(struct kvm *kvm); +int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, + phys_addr_t pa, unsigned long size); int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index c37afd0..ca76be0 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -244,8 +244,12 @@ static void free_guest_pages(pte_t *pte, unsigned long addr) for (i = 0; i < PTRS_PER_PTE; i++) { if (pte_present(*pte)) { - page = pfn_to_page(pte_pfn(*pte)); - put_page(page); + unsigned long pfn = pte_pfn(*pte); + + if (pfn_valid(pfn)) { /* Skip over device memory */ + page = pfn_to_page(pfn); + put_page(page); + } put_page(pte_page); } pte++; @@ -471,6 +475,42 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, return ret; } +/* + * kvm_phys_addr_ioremap -- Map a device range to guest IPA + * + * @kvm: The KVM pointer + * @guest_ipa: The IPA at which to insert the mapping + * @pa: The physical address of the device + * @size: The size of the mapping + */ +int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, + phys_addr_t pa, unsigned long size) +{ + phys_addr_t addr, end; + pgprot_t prot; + int ret = 0; + unsigned long pfn; + + end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; + prot = __pgprot(get_mem_type_prot_pte(MT_DEVICE) | L_PTE_USER | + L_PTE2_READ | L_PTE2_WRITE); + pfn = __phys_to_pfn(pa); + + spin_lock(&kvm->arch.pgd_lock); + for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { + pte_t pte = pfn_pte(pfn, prot); + + ret = stage2_set_pte(kvm, addr, &pte); + if (ret) + break; + + pfn++; + } + spin_unlock(&kvm->arch.pgd_lock); + + return ret; +} + /** * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation * @vcpu: The VCPU pointer -- 1.7.10.3