Rather than assuming which PTEs the SMEP test runs on, look them up to ensure they are correct. If this test were to run on a different page table (ie: run in an L2 test) the wrong PTEs would be set. Switch to looking up the PTEs to avoid this from happening. Signed-off-by: Aaron Lewis <aaronlewis@xxxxxxxxxx> --- lib/libcflat.h | 1 + lib/x86/vm.c | 19 +++++++++++++++++++ lib/x86/vm.h | 3 +++ x86/access.c | 23 +++++++++++++++-------- x86/cstart64.S | 1 - x86/flat.lds | 1 + 6 files changed, 39 insertions(+), 9 deletions(-) diff --git a/lib/libcflat.h b/lib/libcflat.h index 9bb7e08..c1fd31f 100644 --- a/lib/libcflat.h +++ b/lib/libcflat.h @@ -35,6 +35,7 @@ #define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) #define __ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a) - 1) #define ALIGN(x, a) __ALIGN((x), (a)) +#define ALIGN_DOWN(x, a) __ALIGN((x) - ((a) - 1), (a)) #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) #define MIN(a, b) ((a) < (b) ? (a) : (b)) diff --git a/lib/x86/vm.c b/lib/x86/vm.c index 5cd2ee4..cbecddc 100644 --- a/lib/x86/vm.c +++ b/lib/x86/vm.c @@ -281,3 +281,22 @@ void force_4k_page(void *addr) if (pte & PT_PAGE_SIZE_MASK) split_large_page(ptep, 2); } + +/* + * Call the callback, function, on each page from va_start to va_end. + */ +void walk_pte(void *va_start, void *va_end, + void (*function)(pteval_t *pte, void *va)){ + struct pte_search search; + uintptr_t curr_addr; + u64 page_size; + + for (curr_addr = (uintptr_t)va_start; curr_addr < (uintptr_t)va_end; + curr_addr = ALIGN_DOWN(curr_addr + page_size, page_size)) { + search = find_pte_level(current_page_table(), (void *)curr_addr, 1); + assert(found_leaf_pte(search)); + page_size = 1ul << PGDIR_BITS(search.level); + + function(search.pte, (void *)curr_addr); + } +} diff --git a/lib/x86/vm.h b/lib/x86/vm.h index d9753c3..f4ac2c5 100644 --- a/lib/x86/vm.h +++ b/lib/x86/vm.h @@ -52,4 +52,7 @@ struct vm_vcpu_info { u64 cr0; }; +void walk_pte(void *va_start, void *va_end, + void (*function)(pteval_t *pte, void *va)); + #endif diff --git a/x86/access.c b/x86/access.c index 4725bbd..17a6ef9 100644 --- a/x86/access.c +++ b/x86/access.c @@ -201,10 +201,21 @@ static void set_cr0_wp(int wp) } } +static void clear_user_mask(pteval_t *pte, void *va) { + *pte &= ~PT_USER_MASK; +} + +static void set_user_mask(pteval_t *pte, void *va) { + *pte |= PT_USER_MASK; + + /* Flush to avoid spurious #PF */ + invlpg(va); +} + static unsigned set_cr4_smep(int smep) { + extern char stext, etext; unsigned long cr4 = shadow_cr4; - extern u64 ptl2[]; unsigned r; cr4 &= ~CR4_SMEP_MASK; @@ -214,14 +225,10 @@ static unsigned set_cr4_smep(int smep) return 0; if (smep) - ptl2[2] &= ~PT_USER_MASK; + walk_pte(&stext, &etext, clear_user_mask); r = write_cr4_checking(cr4); - if (r || !smep) { - ptl2[2] |= PT_USER_MASK; - - /* Flush to avoid spurious #PF */ - invlpg((void *)(2 << 21)); - } + if (r || !smep) + walk_pte(&stext, &etext, set_user_mask); if (!r) shadow_cr4 = cr4; return r; diff --git a/x86/cstart64.S b/x86/cstart64.S index 5c6ad38..4ba9943 100644 --- a/x86/cstart64.S +++ b/x86/cstart64.S @@ -26,7 +26,6 @@ ring0stacktop: .data .align 4096 -.globl ptl2 ptl2: i = 0 .rept 512 * 4 diff --git a/x86/flat.lds b/x86/flat.lds index a278b56..337bc44 100644 --- a/x86/flat.lds +++ b/x86/flat.lds @@ -3,6 +3,7 @@ SECTIONS . = 4M + SIZEOF_HEADERS; stext = .; .text : { *(.init) *(.text) *(.text.*) } + etext = .; . = ALIGN(4K); .data : { *(.data) -- 2.34.0.rc0.344.g81b53c2807-goog