On Thu, Apr 28, 2022, Manali Shukla wrote: > Current implementation of nested page table does the page table build > up statistically with 2048 PTEs and one pml4 entry. > That is why current implementation is not extensible. > > New implementation does page table build up dynamically based on the > RAM size of the VM which enables us to have separate memory range to > test various npt test cases. I'm guessing you know the drill :-) > Signed-off-by: Manali Shukla <manali.shukla@xxxxxxx> > --- > x86/svm.c | 75 ++++++++++++++++----------------------------------- > x86/svm.h | 4 ++- > x86/svm_npt.c | 5 ++-- > 3 files changed, 29 insertions(+), 55 deletions(-) > > diff --git a/x86/svm.c b/x86/svm.c > index ec825c7..e66c801 100644 > --- a/x86/svm.c > +++ b/x86/svm.c > @@ -8,6 +8,7 @@ > #include "desc.h" > #include "msr.h" > #include "vm.h" > +#include "fwcfg.h" > #include "smp.h" > #include "types.h" > #include "alloc_page.h" > @@ -16,43 +17,32 @@ > #include "vmalloc.h" > > /* for the nested page table*/ > -u64 *pte[2048]; > -u64 *pde[4]; > -u64 *pdpe; > u64 *pml4e; > > struct vmcb *vmcb; > > u64 *npt_get_pte(u64 address) > { > - int i1, i2; > - > - address >>= 12; > - i1 = (address >> 9) & 0x7ff; > - i2 = address & 0x1ff; > - > - return &pte[i1][i2]; > + return get_pte(npt_get_pml4e(), (void*)address); > } > > u64 *npt_get_pde(u64 address) > { > - int i1, i2; > - > - address >>= 21; > - i1 = (address >> 9) & 0x3; > - i2 = address & 0x1ff; > - > - return &pde[i1][i2]; > + struct pte_search search; > + search = find_pte_level(npt_get_pml4e(), (void*)address, 2); > + return search.pte; > } > > -u64 *npt_get_pdpe(void) > +u64 *npt_get_pdpe(u64 address) > { > - return pdpe; > + struct pte_search search; > + search = find_pte_level(npt_get_pml4e(), (void*)address, 3); > + return search.pte; > } > > u64 *npt_get_pml4e(void) > { > - return pml4e; > + return pml4e; > } > > bool smp_supported(void) > @@ -300,11 +290,21 @@ static void set_additional_vcpu_msr(void *msr_efer) > wrmsr(MSR_EFER, (ulong)msr_efer | EFER_SVME); > } > > +void setup_npt(void) { Function braces go on a new line. > + u64 end_of_memory; > + pml4e = alloc_page(); > + ... > diff --git a/x86/svm_npt.c b/x86/svm_npt.c > index 53e8a90..ab4dcf4 100644 > --- a/x86/svm_npt.c > +++ b/x86/svm_npt.c > @@ -209,7 +209,8 @@ static void __svm_npt_rsvd_bits_test(u64 * pxe, u64 rsvd_bits, u64 efer, > "Wanted #NPF on rsvd bits = 0x%lx, got exit = 0x%x", rsvd_bits, > exit_reason); > > - if (pxe == npt_get_pdpe() || pxe == npt_get_pml4e()) { > + if (pxe == npt_get_pdpe((u64) basic_guest_main) > + || pxe == npt_get_pml4e()) { The "||" should be on the previous line. > /* > * The guest's page tables will blow up on a bad PDPE/PML4E, > * before starting the final walk of the guest page. > @@ -338,7 +339,7 @@ skip_pte_test: > get_random_bits(20, 13) | PT_PAGE_SIZE_MASK, > host_efer, host_cr4, guest_efer, guest_cr4); > > - _svm_npt_rsvd_bits_test(npt_get_pdpe(), > + _svm_npt_rsvd_bits_test(npt_get_pdpe((u64) basic_guest_main), > PT_PAGE_SIZE_MASK | > (this_cpu_has(X86_FEATURE_GBPAGES) ? > get_random_bits(29, 13) : 0), host_efer, > -- > 2.30.2 >