At the moment, we're killing the i-cache each time we map a page in stage-2, as we have no idea if this page is used as a data or code page. In order to preserve the cache a bit more, mark all pages with the XN flag. If we get an exec permission fault, invalidate the i-cache and clear the XN bit. We end up getting more faults, but we preserve the i-cache. Signed-off-by: Marc Zyngier <marc.zyngier@xxxxxxx> --- arch/arm/include/asm/kvm_mmu.h | 5 +++++ arch/arm/include/asm/pgtable.h | 2 +- arch/arm/kvm/mmu.c | 7 +++++-- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 257e2bc..dbfeea1 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -95,6 +95,11 @@ static inline void kvm_set_s2pte_writable(pte_t *pte) pte_val(*pte) |= L_PTE_S2_RDWR; } +static inline void kvm_set_s2pte_exec(pte_t *pte) +{ + pte_val(*pte) &= ~L_PTE_XN; +} + struct kvm; static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index f30ac3b..0bb73cc 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -87,7 +87,7 @@ extern pgprot_t pgprot_s2_device; #define PAGE_KERNEL_EXEC pgprot_kernel #define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP) #define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) -#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) +#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY | L_PTE_XN) #define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY) #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 58a45d1..d727a4f 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -543,8 +543,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (is_error_pfn(pfn)) return -EFAULT; - coherent_icache_guest_page(vcpu->kvm, gfn); - spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) goto out_unlock; @@ -560,6 +558,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, kvm_set_pfn_dirty(pfn); } + if (fault_type == KVM_EXEC_FAULT) { + kvm_set_s2pte_exec(&new_pte); + coherent_icache_guest_page(vcpu->kvm, gfn); + } + stage2_set_pte_at(vcpu->kvm, fault_ipa, ptep, new_pte); out_unlock: -- 1.8.1.2 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/cucslists/listinfo/kvmarm