No need to set page dirty if it has never been mapped writable. This a tiny optimization but good to have for people who care much about dirty page tracking. Signed-off-by: Yu Zhao <yuzhao@xxxxxxxxxx> --- arch/x86/kvm/mmu.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 70e95d0..becee33 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -557,9 +557,6 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte) !is_writable_pte(new_spte)) ret = true; - if (!shadow_accessed_mask) - return ret; - /* * Flush TLB when accessed/dirty bits are changed in the page tables, * to guarantee consistency between TLB and page tables. @@ -570,7 +567,8 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte) if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask)) kvm_set_pfn_accessed(spte_to_pfn(old_spte)); - if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask)) + if (shadow_dirty_mask ? + spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask) : ret) kvm_set_pfn_dirty(spte_to_pfn(old_spte)); return ret; @@ -605,7 +603,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep) if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) kvm_set_pfn_accessed(pfn); - if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) + if (old_spte & (shadow_dirty_mask ? : PT_WRITABLE_MASK)) kvm_set_pfn_dirty(pfn); return 1; } -- 2.8.0.rc3.226.g39d4020 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html