On Tue, Jul 02, 2024 at 07:58:11PM +0000, Ashish Kalra wrote: > +static bool make_pte_private(pte_t *pte, unsigned long addr, int pages, int level) > +{ > + struct sev_es_runtime_data *data; > + struct ghcb *ghcb; > + int cpu; > + > + /* > + * Ensure that all the per-cpu GHCBs are made private > + * at the end of unshared loop so that we continue to use the > + * optimized GHCB protocol and not force the switch to > + * MSR protocol till the very end. > + */ > + for_each_possible_cpu(cpu) { > + data = per_cpu(runtime_data, cpu); > + ghcb = &data->ghcb_page; > + /* Check for GHCB for being part of a PMD range */ > + if ((unsigned long)ghcb >= addr && > + (unsigned long)ghcb <= (addr + (pages * PAGE_SIZE))) > + return true; > + } > + > + set_pte_enc(pte, level, (void *)addr); > + snp_set_memory_private(addr, pages); > + > + return true; Zap make_pte_private() diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c index f263ceada006..65234ffb1495 100644 --- a/arch/x86/coco/sev/core.c +++ b/arch/x86/coco/sev/core.c @@ -1022,39 +1022,14 @@ static void set_pte_enc(pte_t *kpte, int level, void *va) set_pte_enc_mask(kpte, d.pfn, d.new_pgprot); } -static bool make_pte_private(pte_t *pte, unsigned long addr, int pages, int level) -{ - struct sev_es_runtime_data *data; - struct ghcb *ghcb; - int cpu; - - /* - * Ensure that all the per-cpu GHCBs are made private - * at the end of unshared loop so that we continue to use the - * optimized GHCB protocol and not force the switch to - * MSR protocol till the very end. - */ - for_each_possible_cpu(cpu) { - data = per_cpu(runtime_data, cpu); - ghcb = &data->ghcb_page; - /* Check for GHCB for being part of a PMD range */ - if ((unsigned long)ghcb >= addr && - (unsigned long)ghcb <= (addr + (pages * PAGE_SIZE))) - return true; - } - - set_pte_enc(pte, level, (void *)addr); - snp_set_memory_private(addr, pages); - - return true; -} - /* Walk the direct mapping and convert all shared memory back to private. */ static void unshare_all_memory(void) { - unsigned long addr, end, size; + unsigned long addr, end, size, ghcb; + struct sev_es_runtime_data *data; unsigned int npages, level; pte_t *pte; + int cpu; /* Unshare the direct mapping. */ addr = PAGE_OFFSET; @@ -1063,17 +1038,28 @@ static void unshare_all_memory(void) while (addr < end) { pte = lookup_address(addr, &level); size = page_level_size(level); + npages = size / PAGE_SIZE; if (!pte || !pte_decrypted(*pte) || pte_none(*pte)) { addr += size; continue; } - npages = size / PAGE_SIZE; + /* + * Ensure that all the per-cpu GHCBs are made private at the + * end of unsharing loop so that the switch to the slower MSR + * protocol happens last. + */ + for_each_possible_cpu(cpu) { + data = per_cpu(runtime_data, cpu); + ghcb = (unsigned long)&data->ghcb_page; + + if (addr <= ghcb && ghcb <= addr + size) + continue; + } - if (!make_pte_private(pte, addr, npages, level)) - pr_err("Failed to unshare range %#lx-%#lx\n", - addr, addr + size); + set_pte_enc(pte, level, (void *)addr); + snp_set_memory_private(addr, npages); } /* Unshare all bss decrypted memory. */ -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette _______________________________________________ kexec mailing list kexec@xxxxxxxxxxxxxxxxxxx http://lists.infradead.org/mailman/listinfo/kexec