The set_memory_{encrypt,decrypt}() are used for changing the pages from decrypted (shared) to encrypted (private) and vice versa. When SEV-SNP is active, the page state transition needs to go through additional steps. If the page is transitioned from shared to private, then perform the following after the encryption attribute is set in the page table: 1. Issue the page state change VMGEXIT to add the memory region in the RMP table. 2. Validate the memory region after the RMP entry is added. To maintain the security guarantees, if the page is transitioned from private to shared, then perform the following before encryption attribute is removed from the page table: 1. Invalidate the page. 2. Issue the page state change VMGEXIT to remove the page from RMP table. To change the page state in the RMP table, use the Page State Change VMGEXIT defined in the GHCB spec section 4.1.6. Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Borislav Petkov <bp@xxxxxxxxx> Cc: Joerg Roedel <jroedel@xxxxxxx> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> Cc: Tony Luck <tony.luck@xxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxx> Cc: "Peter Zijlstra (Intel)" <peterz@xxxxxxxxxxxxx> Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx> Cc: Tom Lendacky <thomas.lendacky@xxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Sean Christopherson <seanjc@xxxxxxxxxx> Cc: x86@xxxxxxxxxx Cc: kvm@xxxxxxxxxxxxxxx Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx> --- arch/x86/include/asm/sev-es.h | 2 + arch/x86/include/asm/sev-snp.h | 4 ++ arch/x86/kernel/sev-es.c | 7 +++ arch/x86/kernel/sev-snp.c | 106 +++++++++++++++++++++++++++++++++ arch/x86/mm/pat/set_memory.c | 19 ++++++ 5 files changed, 138 insertions(+) diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev-es.h index 33838a8f8495..8715e41e2c8f 100644 --- a/arch/x86/include/asm/sev-es.h +++ b/arch/x86/include/asm/sev-es.h @@ -109,6 +109,7 @@ static __always_inline void sev_es_nmi_complete(void) extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd); extern struct ghcb *sev_es_get_ghcb(struct ghcb_state *state); extern void sev_es_put_ghcb(struct ghcb_state *state); +extern int vmgexit_page_state_change(struct ghcb *ghcb, void *data); #else static inline void sev_es_ist_enter(struct pt_regs *regs) { } @@ -118,6 +119,7 @@ static inline void sev_es_nmi_complete(void) { } static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; } static inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state) { return NULL; } static inline void sev_es_put_ghcb(struct ghcb_state *state) { } +static inline int vmgexit_page_state_change(struct ghcb *ghcb, void *data) { return 0; } #endif #endif diff --git a/arch/x86/include/asm/sev-snp.h b/arch/x86/include/asm/sev-snp.h index c4b096206062..59b57a5f6524 100644 --- a/arch/x86/include/asm/sev-snp.h +++ b/arch/x86/include/asm/sev-snp.h @@ -90,6 +90,8 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd unsigned int npages); void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned int npages); +int snp_set_memory_shared(unsigned long vaddr, unsigned int npages); +int snp_set_memory_private(unsigned long vaddr, unsigned int npages); #else /* !CONFIG_AMD_MEM_ENCRYPT */ @@ -110,6 +112,8 @@ early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned i { return 0; } +static inline int snp_set_memory_shared(unsigned long vaddr, unsigned int npages) { return 0; } +static inline int snp_set_memory_private(unsigned long vaddr, unsigned int npages) { return 0; } #endif /* CONFIG_AMD_MEM_ENCRYPT */ diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c index d4957b3fc43f..7309be685440 100644 --- a/arch/x86/kernel/sev-es.c +++ b/arch/x86/kernel/sev-es.c @@ -586,6 +586,13 @@ static bool __init sev_es_setup_ghcb(void) return true; } +int vmgexit_page_state_change(struct ghcb *ghcb, void *data) +{ + ghcb_set_sw_scratch(ghcb, (u64)__pa(data)); + + return sev_es_ghcb_hv_call(ghcb, NULL, SVM_VMGEXIT_PAGE_STATE_CHANGE, 0, 0); +} + #ifdef CONFIG_HOTPLUG_CPU static void sev_es_ap_hlt_loop(void) { diff --git a/arch/x86/kernel/sev-snp.c b/arch/x86/kernel/sev-snp.c index ff9b35bfb05c..d236089c0739 100644 --- a/arch/x86/kernel/sev-snp.c +++ b/arch/x86/kernel/sev-snp.c @@ -15,6 +15,7 @@ #include <asm/sev-es.h> #include <asm/sev-snp.h> +#include <asm/svm.h> static inline u64 sev_es_rd_ghcb_msr(void) { @@ -161,3 +162,108 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr /* Ask hypervisor to make the memory shared in the RMP table. */ early_snp_set_page_state(paddr, npages, SNP_PAGE_STATE_SHARED); } + +static int snp_page_state_vmgexit(struct ghcb *ghcb, struct snp_page_state_change *data) +{ + struct snp_page_state_header *hdr; + int ret = 0; + + hdr = &data->header; + + /* + * The hypervisor can return before processing all the entries, the loop below retries + * until all the entries are processed. + */ + while (hdr->cur_entry <= hdr->end_entry) { + ghcb_set_sw_scratch(ghcb, (u64)__pa(data)); + ret = vmgexit_page_state_change(ghcb, data); + /* Page State Change VMGEXIT can pass error code through exit_info_2. */ + if (ret || ghcb->save.sw_exit_info_2) + break; + } + + return ret; +} + +static void snp_set_page_state(unsigned long paddr, unsigned int npages, int op) +{ + unsigned long paddr_end, paddr_next; + struct snp_page_state_change *data; + struct snp_page_state_header *hdr; + struct snp_page_state_entry *e; + struct ghcb_state state; + struct ghcb *ghcb; + int ret, idx; + + paddr = paddr & PAGE_MASK; + paddr_end = paddr + (npages << PAGE_SHIFT); + + ghcb = sev_es_get_ghcb(&state); + + data = (struct snp_page_state_change *)ghcb->shared_buffer; + hdr = &data->header; + e = &(data->entry[0]); + memset(data, 0, sizeof (*data)); + + for (idx = 0; paddr < paddr_end; paddr = paddr_next) { + int level = PG_LEVEL_4K; + + /* If we cannot fit more request then issue VMGEXIT before going further. */ + if (hdr->end_entry == (SNP_PAGE_STATE_CHANGE_MAX_ENTRY - 1)) { + ret = snp_page_state_vmgexit(ghcb, data); + if (ret) + goto e_fail; + + idx = 0; + memset(data, 0, sizeof (*data)); + e = &(data->entry[0]); + } + + hdr->end_entry = idx; + e->gfn = paddr >> PAGE_SHIFT; + e->operation = op; + e->pagesize = X86_RMP_PG_LEVEL(level); + e++; + idx++; + paddr_next = paddr + page_level_size(level); + } + + /* + * We can exit the above loop before issuing the VMGEXIT, if we exited before calling the + * the VMGEXIT, then issue the VMGEXIT now. + */ + if (idx) + ret = snp_page_state_vmgexit(ghcb, data); + + sev_es_put_ghcb(&state); + return; + +e_fail: + /* Dump stack for the debugging purpose */ + dump_stack(); + + /* Ask to terminate the guest */ + sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST); +} + +int snp_set_memory_shared(unsigned long vaddr, unsigned int npages) +{ + /* Invalidate the memory before changing the page state in the RMP table. */ + sev_snp_issue_pvalidate(vaddr, npages, false); + + /* Change the page state in the RMP table. */ + snp_set_page_state(__pa(vaddr), npages, SNP_PAGE_STATE_SHARED); + + return 0; +} + +int snp_set_memory_private(unsigned long vaddr, unsigned int npages) +{ + /* Change the page state in the RMP table. */ + snp_set_page_state(__pa(vaddr), npages, SNP_PAGE_STATE_PRIVATE); + + /* Validate the memory after the memory is made private in the RMP table. */ + sev_snp_issue_pvalidate(vaddr, npages, true); + + return 0; +} diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 16f878c26667..19ee18ddbc37 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -27,6 +27,8 @@ #include <asm/proto.h> #include <asm/memtype.h> #include <asm/set_memory.h> +#include <asm/mem_encrypt.h> +#include <asm/sev-snp.h> #include "../mm_internal.h" @@ -2001,8 +2003,25 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) */ cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT)); + /* + * To maintain the security gurantees of SEV-SNP guest invalidate the memory before + * clearing the encryption attribute. + */ + if (sev_snp_active() && !enc) { + ret = snp_set_memory_shared(addr, numpages); + if (ret) + return ret; + } + ret = __change_page_attr_set_clr(&cpa, 1); + /* + * Now that memory is mapped encrypted in the page table, validate the memory range before + * we return from here. + */ + if (!ret && sev_snp_active() && enc) + ret = snp_set_memory_private(addr, numpages); + /* * After changing the encryption attribute, we need to flush TLBs again * in case any speculative TLB caching occurred (but no need to flush -- 2.17.1