On Thu, Aug 1, 2024 at 11:36 AM Sean Christopherson <seanjc@xxxxxxxxxx> wrote: > --- a/arch/x86/kvm/mmu/spte.c > +++ b/arch/x86/kvm/mmu/spte.c > @@ -181,7 +181,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, > > spte |= shadow_present_mask; > if (!prefetch) > - spte |= spte_shadow_accessed_mask(spte); > + spte |= shadow_accessed_mask; > > /* > * For simplicity, enforce the NX huge page mitigation even if not > @@ -258,7 +258,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, > } > > if (pte_access & ACC_WRITE_MASK) > - spte |= spte_shadow_dirty_mask(spte); > + spte |= shadow_accessed_mask; spte |= shadow_dirty_mask;