As mentioned in the GHCB spec (Section 2.3.1 GHCB MSR protocol), the page state change GHCB MSR protocol is used to convert a 4K page from private to shared or vice-versa. Add support for this test by allocating a total of 1024 4K pages to ensure the test can handle large cases. The purpose of this test is to determine whether the hypervisor changes the page state to shared when using the MSR protocol. Before the conversion test, ensure the state of the pages are in an expected state (i.e., private) by issuing a re-validation on one of the newly allocated page to determine the expected state of the page matches with the page's current state. Report failure if the expected page state is not private. After the page state conversion to shared by the hypervisor, ensure the state of the pages are in shared by writing data to these pages while the C-bit in its PTEs is not set. Report a failure otherwise. Provide support for cleaning up the physical pages by converting the pages to default guest-owned state before freeing them. Import GHCB MSR PSC related and pvalidate definitions from upstream linux. (arch/x86/inlcude/asm/sev-common.h and arch/x86/include/asm/sev.h) Signed-off-by: Pavan Kumar Paluri <papaluri@xxxxxxx> --- lib/asm-generic/page.h | 2 + lib/x86/amd_sev.c | 158 +++++++++++++++++++++++++++++++++++++++++ lib/x86/amd_sev.h | 69 ++++++++++++++++++ lib/x86/vm.h | 3 + x86/amd_sev.c | 88 +++++++++++++++++++++++ 5 files changed, 320 insertions(+) diff --git a/lib/asm-generic/page.h b/lib/asm-generic/page.h index 5ed086129657..ed9be58e31d0 100644 --- a/lib/asm-generic/page.h +++ b/lib/asm-generic/page.h @@ -14,6 +14,8 @@ #define PAGE_SHIFT 12 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) +#define LARGE_PAGE_SIZE (512 * PAGE_SIZE) +#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE - 1)) #ifndef __ASSEMBLY__ diff --git a/lib/x86/amd_sev.c b/lib/x86/amd_sev.c index f84230eba2a4..5cbdeb35bba8 100644 --- a/lib/x86/amd_sev.c +++ b/lib/x86/amd_sev.c @@ -12,6 +12,8 @@ #include "amd_sev.h" #include "x86/processor.h" #include "x86/vm.h" +#include "vmalloc.h" +#include "alloc_page.h" static unsigned short amd_sev_c_bit_pos; phys_addr_t ghcb_addr; @@ -188,3 +190,159 @@ unsigned long long get_amd_sev_addr_upperbound(void) return PT_ADDR_UPPER_BOUND_DEFAULT; } } + +void set_pte_decrypted(unsigned long vaddr, int npages) +{ + pteval_t *pte; + unsigned long vaddr_end = vaddr + (npages * PAGE_SIZE); + + while (vaddr < vaddr_end) { + pte = get_pte((pgd_t *)read_cr3(), (void *)vaddr); + + if (!pte) + assert_msg(pte, "No pte found for vaddr 0x%lx", vaddr); + + /* unset C-bit */ + *pte &= ~get_amd_sev_c_bit_mask(); + + vaddr += PAGE_SIZE; + } + + flush_tlb(); +} + +void set_pte_encrypted(unsigned long vaddr, int npages) +{ + pteval_t *pte; + unsigned long vaddr_end = vaddr + (npages * PAGE_SIZE); + + while (vaddr < vaddr_end) { + pte = get_pte((pgd_t *)read_cr3(), (void *)vaddr); + + if (!pte) + assert_msg(pte, "No pte found for vaddr 0x%lx", vaddr); + + /* set C-bit */ + *pte |= get_amd_sev_c_bit_mask(); + + vaddr += PAGE_SIZE; + } + + flush_tlb(); +} + +int pvalidate(unsigned long vaddr, bool rmp_size, bool validate) +{ + bool rmp_unchanged; + int result; + + asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFF\n\t" + CC_SET(c) + : CC_OUT(c) (rmp_unchanged), "=a" (result) + : "a" (vaddr), "c" (rmp_size), "d" (validate) + : "memory", "cc"); + + if (rmp_unchanged) + return PVALIDATE_FAIL_NOUPDATE; + + return result; +} + +bool is_validated_private_page(unsigned long vaddr, bool rmp_size) +{ + int ret; + + /* Attempt a PVALIDATE here for the provided page size */ + ret = pvalidate(vaddr, rmp_size, true); + if (ret == PVALIDATE_FAIL_NOUPDATE) + return true; + + /* + * If PVALIDATE_FAIL_SIZEMISMATCH, entry in the RMP is 4K and + * what guest is providing is a 2M entry. Therefore, fallback + * to pvalidating 4K entries within 2M range. + */ + if (rmp_size && ret == PVALIDATE_FAIL_SIZEMISMATCH) { + unsigned long vaddr_end = vaddr + LARGE_PAGE_SIZE; + + for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) { + ret = pvalidate(vaddr, RMP_PG_SIZE_4K, true); + if (ret != PVALIDATE_FAIL_NOUPDATE) + return false; + } + + return true; + } + + return false; +} + +enum es_result __sev_set_pages_state_msr_proto(unsigned long vaddr, int npages, + int operation) +{ + unsigned long vaddr_end = vaddr + (npages * PAGE_SIZE); + unsigned long paddr; + int ret; + unsigned long val; + + /* + * GHCB maybe established at this point, so save and restore the + * current value which will be overwritten by the MSR protocol + * request. + */ + phys_addr_t ghcb_old_msr = rdmsr(SEV_ES_GHCB_MSR_INDEX); + + while (vaddr < vaddr_end) { + paddr = __pa(vaddr); + + if (operation == SNP_PAGE_STATE_SHARED) { + ret = pvalidate(vaddr, RMP_PG_SIZE_4K, false); + if (ret) { + printf("Failed to invalidate vaddr: 0x%lx, ret: %d\n", + vaddr, ret); + wrmsr(SEV_ES_GHCB_MSR_INDEX, ghcb_old_msr); + return ES_UNSUPPORTED; + } + } + + wrmsr(SEV_ES_GHCB_MSR_INDEX, + GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, operation)); + + VMGEXIT(); + + val = rdmsr(SEV_ES_GHCB_MSR_INDEX); + + if (GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) { + printf("Incorrect PSC response code: 0x%x\n", + (unsigned int)GHCB_RESP_CODE(val)); + wrmsr(SEV_ES_GHCB_MSR_INDEX, ghcb_old_msr); + return ES_VMM_ERROR; + } + + if (GHCB_MSR_PSC_RESP_VAL(val)) { + printf("Failed to change page state to %s paddr: 0x%lx error: 0x%llx\n", + operation == SNP_PAGE_STATE_PRIVATE ? "private" : + "shared", + paddr, GHCB_MSR_PSC_RESP_VAL(val)); + wrmsr(SEV_ES_GHCB_MSR_INDEX, ghcb_old_msr); + return ES_VMM_ERROR; + } + + if (operation == SNP_PAGE_STATE_PRIVATE) { + ret = pvalidate(vaddr, RMP_PG_SIZE_4K, true); + if (ret) { + printf("Failed to validate vaddr: 0x%lx, ret: %d\n", + vaddr, ret); + wrmsr(SEV_ES_GHCB_MSR_INDEX, ghcb_old_msr); + return ES_UNSUPPORTED; + } + } + + vaddr += PAGE_SIZE; + } + + /* Restore old GHCB msr - setup by OVMF */ + wrmsr(SEV_ES_GHCB_MSR_INDEX, ghcb_old_msr); + + return ES_OK; +} diff --git a/lib/x86/amd_sev.h b/lib/x86/amd_sev.h index 70f3763fe231..04c569be57eb 100644 --- a/lib/x86/amd_sev.h +++ b/lib/x86/amd_sev.h @@ -84,6 +84,16 @@ struct ghcb { #define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); } +/* PVALIDATE return codes */ +#define PVALIDATE_FAIL_SIZEMISMATCH 6 + +/* Software defined (when rFlags.CF = 1) */ +#define PVALIDATE_FAIL_NOUPDATE 255 + +/* RMP page size */ +#define RMP_PG_SIZE_4K 0 +#define RMP_PG_SIZE_2M 1 + enum es_result { ES_OK, /* All good */ ES_UNSUPPORTED, /* Requested operation not supported */ @@ -106,6 +116,13 @@ struct es_em_ctxt { struct es_fault_info fi; }; +/* + * Assign a large enough order to run SEV-SNP based tests for 4K as well + * as 2M ranges + */ +#define SEV_ALLOC_ORDER 10 +#define SEV_ALLOC_PAGE_COUNT 1 << SEV_ALLOC_ORDER + /* * AMD SEV Confidential computing blob structure. The structure is * defined in OVMF UEFI firmware header: @@ -157,15 +174,67 @@ efi_status_t setup_amd_sev(void); */ #define SEV_ES_GHCB_MSR_INDEX 0xc0010130 +#define GHCB_DATA_LOW 12 +#define GHCB_MSR_INFO_MASK (BIT_ULL(GHCB_DATA_LOW) - 1) +#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK) + +/* + * SNP Page State Change Operation + * + * GHCBData[55:52] - Page operation: + * 0x0001 Page assignment, Private + * 0x0002 Page assignment, Shared + * 0x0003 PSMASH + * 0x0004 UNSMASH + */ +enum psc_op { + SNP_PAGE_STATE_PRIVATE = 1, + SNP_PAGE_STATE_SHARED, + SNP_PAGE_STATE_PSMASH, + SNP_PAGE_STATE_UNSMASH, +}; + +#define GHCB_MSR_PSC_REQ 0x14 +#define GHCB_MSR_PSC_REQ_GFN(gfn, op) \ + /* GHCBData[55:52] */ \ + (((u64)((op) & 0xf) << 52) | \ + /* GHCBData[51:12] */ \ + ((u64)((gfn) & GENMASK_ULL(39, 0)) << 12) | \ + /* GHCBData[11:0] */ \ + GHCB_MSR_PSC_REQ) + +#define GHCB_MSR_PSC_RESP 0x15 +#define GHCB_MSR_PSC_RESP_VAL(val) \ + /* GHCBData[63:32] */ \ + (((u64)(val) & GENMASK_ULL(63, 32)) >> 32) + bool amd_sev_es_enabled(void); efi_status_t setup_vc_handler(void); bool amd_sev_snp_enabled(void); void setup_ghcb_pte(pgd_t *page_table); void handle_sev_es_vc(struct ex_regs *regs); +int pvalidate(unsigned long vaddr, bool rmp_size, bool validate); +void set_pte_decrypted(unsigned long vaddr, int npages); +void set_pte_encrypted(unsigned long vaddr, int npages); +bool is_validated_private_page(unsigned long vaddr, bool rmp_size); +enum es_result __sev_set_pages_state_msr_proto(unsigned long vaddr, + int npages, int operation); unsigned long long get_amd_sev_c_bit_mask(void); unsigned long long get_amd_sev_addr_upperbound(void); +/* + * Macros to generate condition code outputs from inline assembly, + * The output operand must be type "bool". + */ +#ifdef __GCC_ASM_FLAG_OUTPUTS__ +# define CC_SET(c) "\n\t/* output condition code " #c "*/\n" +# define CC_OUT(c) "=@cc" #c +#else +# define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n" +# define CC_OUT(c)[_cc_ ## c] "=qm" +#endif + /* GHCB Accessor functions from Linux's include/asm/svm.h */ #define GHCB_BITMAP_IDX(field) \ (offsetof(struct ghcb_save_area, field) / sizeof(u64)) diff --git a/lib/x86/vm.h b/lib/x86/vm.h index cf39787aa8b0..a5bd8d4ecf7c 100644 --- a/lib/x86/vm.h +++ b/lib/x86/vm.h @@ -7,6 +7,9 @@ #include "asm/io.h" #include "asm/bitops.h" +#define ORDER_4K 0 +#define ORDER_2M 9 + void setup_5level_page_table(void); struct pte_search { diff --git a/x86/amd_sev.c b/x86/amd_sev.c index 4c34a5965a1b..3b1593e42634 100644 --- a/x86/amd_sev.c +++ b/x86/amd_sev.c @@ -14,6 +14,9 @@ #include "x86/processor.h" #include "x86/amd_sev.h" #include "msr.h" +#include "vmalloc.h" +#include "x86/vm.h" +#include "alloc_page.h" #define EXIT_SUCCESS 0 #define EXIT_FAILURE 1 @@ -128,6 +131,85 @@ static void test_stringio(void) report((got & 0xff00) >> 8 == st1[sizeof(st1) - 2], "outsb up"); } +static enum es_result sev_set_pages_state_msr_proto(unsigned long vaddr, + int npages, int operation) +{ + efi_status_t status; + + vaddr &= PAGE_MASK; + + if (operation == SNP_PAGE_STATE_SHARED) { + status = __sev_set_pages_state_msr_proto(vaddr, npages, operation); + + if (status != ES_OK) { + printf("Page state change (private->shared) failure"); + return status; + } + + set_pte_decrypted(vaddr, npages); + } else { + set_pte_encrypted(vaddr, npages); + + status = __sev_set_pages_state_msr_proto(vaddr, npages, operation); + + if (status != ES_OK) { + printf("Page state change (shared->private) failure.\n"); + return status; + } + } + + return ES_OK; +} + +static int test_write(unsigned long vaddr, int npages) +{ + unsigned long vaddr_end = vaddr + (npages << PAGE_SHIFT); + + while (vaddr < vaddr_end) { + memcpy((void *)vaddr, st1, strnlen(st1, PAGE_SIZE)); + vaddr += PAGE_SIZE; + } + + return 0; +} + +static void test_sev_psc_ghcb_msr(void) +{ + void *vaddr; + efi_status_t status; + + report_info("TEST: GHCB MSR based Page state change test"); + + vaddr = alloc_pages(SEV_ALLOC_ORDER); + force_4k_page(vaddr); + + report(is_validated_private_page((unsigned long)vaddr, RMP_PG_SIZE_4K), + "Expected page state: Private"); + + status = sev_set_pages_state_msr_proto((unsigned long)vaddr, + SEV_ALLOC_PAGE_COUNT, + SNP_PAGE_STATE_SHARED); + + report(status == ES_OK, "Private->Shared Page state change for %d pages", + SEV_ALLOC_PAGE_COUNT); + + /* + * Access the now-shared page(s) with C-bit cleared and ensure + * writes to these pages are successful + */ + report(!test_write((unsigned long)vaddr, SEV_ALLOC_PAGE_COUNT), + "Write to %d unencrypted 4K pages after private->shared conversion", + (SEV_ALLOC_PAGE_COUNT) / (1 << ORDER_4K)); + + /* convert the pages back to private after PSC */ + status = sev_set_pages_state_msr_proto((unsigned long)vaddr, + SEV_ALLOC_PAGE_COUNT, + SNP_PAGE_STATE_PRIVATE); + + /* Free up all the pages */ + free_pages_by_order(vaddr, SEV_ALLOC_ORDER); +} + int main(void) { int rtn; @@ -136,5 +218,11 @@ int main(void) test_sev_es_activation(); test_sev_snp_activation(); test_stringio(); + + /* Setup a new page table via setup_vm() */ + setup_vm(); + if (amd_sev_snp_enabled()) + test_sev_psc_ghcb_msr(); + return report_summary(); } -- 2.34.1