When calling stage2_set_pte from kvm_phys_addr_ioremap we pass an argument to say that this is an IO mapping, and that we expect the adress range to be free, otherwise return an error. This should catch errors earlier when user space supplies guest physical addresses that overlap. Signed-off-by: Christoffer Dall <c.dall@xxxxxxxxxxxxxxxxxxxxxx> --- arch/arm/kvm/mmu.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 0ab098e..e5ace0e 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -422,7 +422,7 @@ static void stage2_clear_pte(struct kvm *kvm, phys_addr_t addr) } static void stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, - phys_addr_t addr, const pte_t *new_pte) + phys_addr_t addr, const pte_t *new_pte, bool iomap) { pgd_t *pgd; pud_t *pud; @@ -454,6 +454,9 @@ static void stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, } else pte = pte_offset_kernel(pmd, addr); + if (iomap && pte_present(old_pte)) + return -EFAULT; + /* Create 2nd stage page table mapping - Level 3 */ old_pte = *pte; set_pte_ext(pte, *new_pte, 0); @@ -489,7 +492,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, if (ret) goto out; spin_lock(&kvm->mmu_lock); - stage2_set_pte(kvm, &cache, addr, &pte); + stage2_set_pte(kvm, &cache, addr, &pte, true); spin_unlock(&kvm->mmu_lock); pfn++; @@ -565,7 +568,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, pte_val(new_pte) |= L_PTE_S2_RDWR; kvm_set_pfn_dirty(pfn); } - stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte); + stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); @@ -716,7 +719,7 @@ static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) { pte_t *pte = (pte_t *)data; - stage2_set_pte(kvm, NULL, gpa, pte); + stage2_set_pte(kvm, NULL, gpa, pte, false); } -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html