Re: [PATCH v10 21/45] x86/mm: Add support to validate memory when changing C-bit

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, Feb 11, 2022 at 03:55:23PM +0100, Borislav Petkov wrote:
>> Also, I think adding required functions to x86_platform.guest. is a very
>> nice way to solve the ugly if (guest_type) querying all over the place.

> So I guess something like below. It builds here...

I made a small change to use prepare() and finish(). The idea is that prepare()
will be called before the encryption mask is changed in the page table, and
finish() will be called after the change.

I have not tried integrating the SNP PSC yet, but want to check if approach
will work for Krill.

---
 arch/x86/include/asm/set_memory.h |  1 -
 arch/x86/include/asm/sev.h        |  3 +++
 arch/x86/include/asm/x86_init.h   | 15 +++++++++++++++
 arch/x86/kernel/sev.c             |  3 +++
 arch/x86/mm/mem_encrypt_amd.c     | 14 ++++++++++----
 arch/x86/mm/pat/set_memory.c      | 13 +++++++------
 6 files changed, 38 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index ff0f2d90338a..ce8dd215f5b3 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -84,7 +84,6 @@ int set_pages_rw(struct page *page, int numpages);
 int set_direct_map_invalid_noflush(struct page *page);
 int set_direct_map_default_noflush(struct page *page);
 bool kernel_page_present(struct page *page);
-void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc);
 
 extern int kernel_set_to_readonly;
 
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index ec060c433589..2ebd8c225257 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -87,6 +87,9 @@ extern enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
 					  struct es_em_ctxt *ctxt,
 					  u64 exit_code, u64 exit_info_1,
 					  u64 exit_info_2);
+void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc);
+void amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc);
+
 #else
 static inline void sev_es_ist_enter(struct pt_regs *regs) { }
 static inline void sev_es_ist_exit(void) { }
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 22b7412c08f6..da7fc1c0b917 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -141,6 +141,20 @@ struct x86_init_acpi {
 	void (*reduced_hw_early_init)(void);
 };
 
+/**
+ * struct x86_guest - Functions used by misc guest incarnations like SEV, TDX, etc.
+ *
+ * @enc_status_change_prepare	Notify HV before the encryption status of a range
+ * 				is changed.
+ *
+ * @enc_status_change_finish	Notify HV after the encryption status of a range
+ * 				is changed.
+ */
+struct x86_guest {
+	void (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
+	void (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
+};
+
 /**
  * struct x86_init_ops - functions for platform specific setup
  *
@@ -287,6 +301,7 @@ struct x86_platform_ops {
 	struct x86_legacy_features legacy;
 	void (*set_legacy_features)(void);
 	struct x86_hyper_runtime hyper;
+	struct x86_guest guest;
 };
 
 struct x86_apic_ops {
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index e6d316a01fdd..3b2133fd6682 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -760,6 +760,9 @@ void __init sev_es_init_vc_handling(void)
 
 	BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
 
+	x86_platform.guest.enc_status_change_prepare = amd_enc_status_change_prepare;
+	x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish;
+
 	if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
 		return;
 
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index 2b2d018ea345..c93b6c2fc6a3 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -256,7 +256,11 @@ static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)
 	return pfn;
 }
 
-void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc)
+void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
+{
+}
+
+void amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
 {
 #ifdef CONFIG_PARAVIRT
 	unsigned long sz = npages << PAGE_SHIFT;
@@ -280,7 +284,7 @@ void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc)
 		psize = page_level_size(level);
 		pmask = page_level_mask(level);
 
-		notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc);
+		amd_enc_status_change_finish(pfn, psize >> PAGE_SHIFT, enc);
 
 		vaddr = (vaddr & pmask) + psize;
 	}
@@ -341,6 +345,8 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
 	vaddr_next = vaddr;
 	vaddr_end = vaddr + size;
 
+	amd_enc_status_change_prepare(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
+
 	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
 		kpte = lookup_address(vaddr, &level);
 		if (!kpte || pte_none(*kpte)) {
@@ -392,7 +398,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
 
 	ret = 0;
 
-	notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
+	amd_enc_status_change_finish(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
 out:
 	__flush_tlb_all();
 	return ret;
@@ -410,7 +416,7 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
 
 void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
 {
-	notify_range_enc_status_changed(vaddr, npages, enc);
+	amd_enc_status_change_finish(vaddr, npages, enc);
 }
 
 void __init mem_encrypt_free_decrypted_mem(void)
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index b4072115c8ef..a55477a6e578 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -2012,8 +2012,15 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
 	 */
 	cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
 
+	/* Notify HV that we are about to set/clr encryption attribute. */
+	x86_platform.guest.enc_status_change_prepare(addr, numpages, enc);
+
 	ret = __change_page_attr_set_clr(&cpa, 1);
 
+	/* Notify HV that we have succesfully set/clr encryption attribute. */
+	if (!ret)
+		x86_platform.guest.enc_status_change_finish(addr, numpages, enc);
+
 	/*
 	 * After changing the encryption attribute, we need to flush TLBs again
 	 * in case any speculative TLB caching occurred (but no need to flush
@@ -2023,12 +2030,6 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
 	 */
 	cpa_flush(&cpa, 0);
 
-	/*
-	 * Notify hypervisor that a given memory range is mapped encrypted
-	 * or decrypted.
-	 */
-	notify_range_enc_status_changed(addr, numpages, enc);
-
 	return ret;
 }
 
-- 
2.25.1






[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux