On Mon, Mar 11, 2024 at 10:24:50AM -0700, Sean Christopherson <seanjc@xxxxxxxxxx> wrote: > On Fri, Mar 01, 2024, isaku.yamahata@xxxxxxxxx wrote: > > From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> > > > > Another function will initialize struct kvm_page_fault. Add initializer > > macro to unify the big struct initialization. > > > > No functional change intended. > > > > Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> > > --- > > arch/x86/kvm/mmu/mmu_internal.h | 44 +++++++++++++++++++-------------- > > 1 file changed, 26 insertions(+), 18 deletions(-) > > > > diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h > > index 0669a8a668ca..72ef09fc9322 100644 > > --- a/arch/x86/kvm/mmu/mmu_internal.h > > +++ b/arch/x86/kvm/mmu/mmu_internal.h > > @@ -279,27 +279,35 @@ enum { > > RET_PF_SPURIOUS, > > }; > > > > +#define KVM_PAGE_FAULT_INIT(_vcpu, _cr2_or_gpa, _err, _prefetch, _max_level) { \ > > + .addr = (_cr2_or_gpa), \ > > + .error_code = (_err), \ > > + .exec = (_err) & PFERR_FETCH_MASK, \ > > + .write = (_err) & PFERR_WRITE_MASK, \ > > + .present = (_err) & PFERR_PRESENT_MASK, \ > > + .rsvd = (_err) & PFERR_RSVD_MASK, \ > > + .user = (_err) & PFERR_USER_MASK, \ > > + .prefetch = (_prefetch), \ > > + .is_tdp = \ > > + likely((_vcpu)->arch.mmu->page_fault == kvm_tdp_page_fault), \ > > + .nx_huge_page_workaround_enabled = \ > > + is_nx_huge_page_enabled((_vcpu)->kvm), \ > > + \ > > + .max_level = (_max_level), \ > > + .req_level = PG_LEVEL_4K, \ > > + .goal_level = PG_LEVEL_4K, \ > > + .is_private = \ > > + kvm_mem_is_private((_vcpu)->kvm, (_cr2_or_gpa) >> PAGE_SHIFT), \ > > + \ > > + .pfn = KVM_PFN_ERR_FAULT, \ > > + .hva = KVM_HVA_ERR_BAD, } > > + > > Oof, no. I would much rather refactor kvm_mmu_do_page_fault() as needed than > have to maintain a macro like this. Ok, I updated it as follows. diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 0669a8a668ca..e57cc3c56a6d 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -279,8 +279,8 @@ enum { RET_PF_SPURIOUS, }; -static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - u32 err, bool prefetch, int *emulation_type) +static inline int __kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + u32 err, bool prefetch, int *emulation_type) { struct kvm_page_fault fault = { .addr = cr2_or_gpa, @@ -307,6 +307,21 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn); } + if (IS_ENABLED(CONFIG_RETPOLINE) && fault.is_tdp) + r = kvm_tdp_page_fault(vcpu, &fault); + else + r = vcpu->arch.mmu->page_fault(vcpu, &fault); + + if (fault.write_fault_to_shadow_pgtable && emulation_type) + *emulation_type |= EMULTYPE_WRITE_PF_TO_SP; + return r; +} + +static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + u32 err, bool prefetch, int *emulation_type) +{ + int r; + /* * Async #PF "faults", a.k.a. prefetch faults, are not faults from the * guest perspective and have already been counted at the time of the @@ -315,13 +330,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, if (!prefetch) vcpu->stat.pf_taken++; - if (IS_ENABLED(CONFIG_RETPOLINE) && fault.is_tdp) - r = kvm_tdp_page_fault(vcpu, &fault); - else - r = vcpu->arch.mmu->page_fault(vcpu, &fault); - - if (fault.write_fault_to_shadow_pgtable && emulation_type) - *emulation_type |= EMULTYPE_WRITE_PF_TO_SP; + r = __kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, err, prefetch, emulation_type); /* * Similar to above, prefetch faults aren't truly spurious, and the -- 2.43.2 -- Isaku Yamahata <isaku.yamahata@xxxxxxxxx>