On Mon, Feb 20, 2023 at 12:38:06PM -0600, Michael Roth wrote: > From: Brijesh Singh <brijesh.singh@xxxxxxx> > > The integrity guarantee of SEV-SNP is enforced through the RMP table. > The RMP is used with standard x86 and IOMMU page tables to enforce > memory restrictions and page access rights. The RMP check is enforced as > soon as SEV-SNP is enabled globally in the system. When hardware > encounters an RMP-check failure, it raises a page-fault exception. > > The rmp_make_private() and rmp_make_shared() helpers are used to add > or remove the pages from the RMP table. Improve the rmp_make_private() > to invalidate state so that pages cannot be used in the direct-map after > they are added the RMP table, and restored to their default valid > permission after the pages are removed from the RMP table. > > Co-developed-by: Ashish Kalra <ashish.kalra@xxxxxxx> > Signed-off-by: Ashish Kalra <ashish.kalra@xxxxxxx> > Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx> > Signed-off-by: Michael Roth <michael.roth@xxxxxxx> > --- > arch/x86/kernel/sev.c | 57 +++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 57 insertions(+) > > diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c > index a49f30c10dc1..3e5ff5934e83 100644 > --- a/arch/x86/kernel/sev.c > +++ b/arch/x86/kernel/sev.c > @@ -2595,6 +2595,37 @@ int psmash(u64 pfn) > } > EXPORT_SYMBOL_GPL(psmash); > > +static int restore_direct_map(u64 pfn, int npages) > +{ > + int i, ret = 0; > + > + for (i = 0; i < npages; i++) { > + ret = set_direct_map_default_noflush(pfn_to_page(pfn + i)); > + if (ret) > + goto cleanup; > + } > + > +cleanup: > + WARN(ret > 0, "Failed to restore direct map for pfn 0x%llx\n", pfn + i); > + return ret; > +} > + > +static int invalidate_direct_map(u64 pfn, int npages) > +{ > + int i, ret = 0; > + > + for (i = 0; i < npages; i++) { > + ret = set_direct_map_invalid_noflush(pfn_to_page(pfn + i)); > + if (ret) > + goto cleanup; > + } > + > +cleanup: > + WARN(ret > 0, "Failed to invalidate direct map for pfn 0x%llx\n", pfn + i); > + restore_direct_map(pfn, i); This immediately restores the direct map after invalidating it. It probably needs to put behind if(ret). Regards, Tom > + return ret; > +} > + > static int rmpupdate(u64 pfn, struct rmp_state *val) > { > int max_attempts = 4 * num_present_cpus(); > @@ -2605,6 +2636,21 @@ static int rmpupdate(u64 pfn, struct rmp_state *val) > if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) > return -ENXIO; > > + level = RMP_TO_X86_PG_LEVEL(val->pagesize); > + npages = page_level_size(level) / PAGE_SIZE; > + > + /* > + * If page is getting assigned in the RMP table then unmap it from the > + * direct map. > + */ > + if (val->assigned) { > + if (invalidate_direct_map(pfn, npages)) { > + pr_err("Failed to unmap %d pages at pfn 0x%llx from the direct_map\n", > + npages, pfn); > + return -EFAULT; > + } > + } > + > do { > /* Binutils version 2.36 supports the RMPUPDATE mnemonic. */ > asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE" > @@ -2630,6 +2676,17 @@ static int rmpupdate(u64 pfn, struct rmp_state *val) > attempts, val->asid, ret, pfn, npages); > } > > + /* > + * Restore the direct map after the page is removed from the RMP table. > + */ > + if (!val->assigned) { > + if (restore_direct_map(pfn, npages)) { > + pr_err("Failed to map %d pages at pfn 0x%llx into the direct_map\n", > + npages, pfn); > + return -EFAULT; > + } > + } > + > return 0; > } > > -- > 2.25.1 >