During write protecting PTEs, if hardware dirty log is enabled, set the DBM bit of PTEs when they are *already writable*. This ensures some mechanisms that rely on "write fault", such as CoW, are not broken. Signed-off-by: Keqian Zhu <zhukeqian1@xxxxxxxxxx> Signed-off-by: Peng Liang <liangpeng10@xxxxxxxxxx> --- arch/arm64/kvm/mmu.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index f08b0fbca0a0..742c7943176f 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1536,19 +1536,24 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, /** * stage2_wp_ptes - write protect PMD range + * @kvm: kvm instance for the VM * @pmd: pointer to pmd entry * @addr: range start address * @end: range end address */ -static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) +static void stage2_wp_ptes(struct kvm *kvm, pmd_t *pmd, + phys_addr_t addr, phys_addr_t end) { pte_t *pte; pte = pte_offset_kernel(pmd, addr); do { - if (!pte_none(*pte)) { - if (!kvm_s2pte_readonly(pte)) - kvm_set_s2pte_readonly(pte); + if (!pte_none(*pte) && !kvm_s2pte_readonly(pte)) { +#ifdef CONFIG_ARM64_HW_AFDBM + if (kvm->arch.hw_dirty_log && !kvm_s2pte_dbm(pte)) + kvm_set_s2pte_dbm(pte); +#endif + kvm_set_s2pte_readonly(pte); } } while (pte++, addr += PAGE_SIZE, addr != end); } @@ -1575,7 +1580,7 @@ static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud, if (!kvm_s2pmd_readonly(pmd)) kvm_set_s2pmd_readonly(pmd); } else { - stage2_wp_ptes(pmd, addr, next); + stage2_wp_ptes(kvm, pmd, addr, next); } } } while (pmd++, addr = next, addr != end); -- 2.19.1 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm