On Fri, Dec 09, 2022 at 12:45:51PM +0800, Robert Hoo wrote: > The get_cr3() is the implementation of kvm_mmu::get_guest_pgd(), well, CR3 > cannot be naturally equivalent to pgd, SDM says CR3 high bits are reserved, > must be zero. > And now, with LAM feature's introduction, bit 61 ~ 62 are used. > So, rename get_cr3() --> get_pgd() to better indicate function purpose and > in it, filtered out CR3 high bits. > > Signed-off-by: Robert Hoo <robert.hu@xxxxxxxxxxxxxxx> > Reviewed-by: Jingqi Liu <jingqi.liu@xxxxxxxxx> > --- > arch/x86/include/asm/processor-flags.h | 1 + > arch/x86/kvm/mmu/mmu.c | 12 ++++++++---- > 2 files changed, 9 insertions(+), 4 deletions(-) > > diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h > index d8cccadc83a6..bb0f8dd16956 100644 > --- a/arch/x86/include/asm/processor-flags.h > +++ b/arch/x86/include/asm/processor-flags.h > @@ -38,6 +38,7 @@ > #ifdef CONFIG_X86_64 > /* Mask off the address space ID and SME encryption bits. */ > #define CR3_ADDR_MASK __sme_clr(PHYSICAL_PAGE_MASK) > +#define CR3_HIGH_RSVD_MASK GENMASK_ULL(63, 52) > #define CR3_PCID_MASK 0xFFFull > #define CR3_NOFLUSH BIT_ULL(63) > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index b6f96d47e596..d433c8923b18 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -4488,9 +4488,13 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd) > } > EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd); > > -static unsigned long get_cr3(struct kvm_vcpu *vcpu) > +static unsigned long get_pgd(struct kvm_vcpu *vcpu) > { > +#ifdef CONFIG_X86_64 > + return kvm_read_cr3(vcpu) & ~CR3_HIGH_RSVD_MASK; CR3_HIGH_RSVD_MASK is used to extract the guest pgd, may need to use guest's MAXPHYADDR but not hard code to 52. Or easily, just mask out the LAM bits. > +#else > return kvm_read_cr3(vcpu); > +#endif > } > > static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, > @@ -5043,7 +5047,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu, > context->page_fault = kvm_tdp_page_fault; > context->sync_page = nonpaging_sync_page; > context->invlpg = NULL; > - context->get_guest_pgd = get_cr3; > + context->get_guest_pgd = get_pgd; > context->get_pdptr = kvm_pdptr_read; > context->inject_page_fault = kvm_inject_page_fault; > > @@ -5193,7 +5197,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu, > > kvm_init_shadow_mmu(vcpu, cpu_role); > > - context->get_guest_pgd = get_cr3; > + context->get_guest_pgd = get_pgd; > context->get_pdptr = kvm_pdptr_read; > context->inject_page_fault = kvm_inject_page_fault; > } > @@ -5207,7 +5211,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu, > return; > > g_context->cpu_role.as_u64 = new_mode.as_u64; > - g_context->get_guest_pgd = get_cr3; > + g_context->get_guest_pgd = get_pgd; > g_context->get_pdptr = kvm_pdptr_read; > g_context->inject_page_fault = kvm_inject_page_fault; > > -- > 2.31.1 >