On 25/09/20 23:22, Ben Gardon wrote: > > -static bool is_nx_huge_page_enabled(void) > +bool is_nx_huge_page_enabled(void) > { > return READ_ONCE(nx_huge_pages); > } > @@ -381,7 +361,7 @@ static inline u64 spte_shadow_dirty_mask(u64 spte) > return spte_ad_enabled(spte) ? shadow_dirty_mask : 0; > } > > -static inline bool is_access_track_spte(u64 spte) > +inline bool is_access_track_spte(u64 spte) > { > return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0; > } > @@ -433,7 +413,7 @@ static u64 get_mmio_spte_generation(u64 spte) > return gen; > } > > -static u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) > +u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) > { > > u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; > @@ -613,7 +593,7 @@ int is_shadow_present_pte(u64 pte) > return (pte != 0) && !is_mmio_spte(pte); > } > > -static int is_large_pte(u64 pte) > +int is_large_pte(u64 pte) > { > return pte & PT_PAGE_SIZE_MASK; > } All candidates for inlining too (Also probably we'll create a common.c file for stuff that is common to the shadow and TDP MMU, but that can come later). Paolo