Introduce spte_has_volatile_bits() function to judge whether spte bits will miss, it's more readable and can help us to cleanup code later Signed-off-by: Xiao Guangrong <xiaoguangrong@xxxxxxxxxxxxxx> --- arch/x86/kvm/mmu.c | 20 +++++++++++++++++--- 1 files changed, 17 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f7b379a..e18834c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -303,6 +303,20 @@ static u64 __xchg_spte(u64 *sptep, u64 new_spte) #endif } +static bool spte_has_volatile_bits(u64 spte) +{ + if (!shadow_accessed_mask) + return false; + + if (!is_shadow_present_pte(spte)) + return false; + + if (spte & shadow_accessed_mask) + return false; + + return true; +} + static void update_spte(u64 *sptep, u64 new_spte) { u64 old_spte; @@ -683,14 +697,14 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte) pfn_t pfn; u64 old_spte = *sptep; - if (!shadow_accessed_mask || !is_shadow_present_pte(old_spte) || - old_spte & shadow_accessed_mask) { + if (!spte_has_volatile_bits(old_spte)) __set_spte(sptep, new_spte); - } else + else old_spte = __xchg_spte(sptep, new_spte); if (!is_rmap_spte(old_spte)) return; + pfn = spte_to_pfn(old_spte); if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) kvm_set_pfn_accessed(pfn); -- 1.6.1.2 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html