On Fri, Jan 10, 2020 at 12:37:33PM +0100, Vitaly Kuznetsov wrote: > Sean Christopherson <sean.j.christopherson@xxxxxxxxx> writes: > > --- a/arch/x86/kvm/mmu/paging_tmpl.h > > +++ b/arch/x86/kvm/mmu/paging_tmpl.h > > @@ -128,6 +128,21 @@ static inline int FNAME(is_present_gpte)(unsigned long pte) > > #endif > > } > > > > +static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte) > > +{ > > +#if PTTYPE != PTTYPE_EPT > > + return false; > > +#else > > + return __is_bad_mt_xwr(rsvd_check, gpte); > > +#endif > > +} > > + > > +static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) > > +{ > > + return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) || > > + FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte); > > +} > > + > > Not sure if it would make sense/difference (well, this is famous KVM > MMU!) but as FNAME(is_bad_mt_xwr) > > has only one call site we could've as well merged it, something like: > > static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) > { > #if PTTYPE == PTTYPE_EPT > bool res = __is_bad_mt_xwr(&mmu->guest_rsvd_check, gpte); > #else > bool res = false; > #endif > return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) || res; > } > > but keeping it in-line with __is_rsvd_bits_set()/__is_bad_mt_xwr() in > mmu.c likely has greater benefits. Ya, I don't love the code, but it was the least awful of the options I tried, in that it's the most readable without being too obnoxious. Similar to your suggestion, but it avoids evaluating __is_bad_mt_xwr() if reserved bits are set, which is admittedly rare. return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) #if PTTYPE == PTTYPE_EPT || __is_bad_mt_xwr(&mmu->guest_rsvd_check, gpte) #endif ; Or macrofying the call to keep the call site clean, but IMO this obfuscates things too much. return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) || IS_BAD_MT_XWR(&mmu->guest_rsvd_check, gpte);