On Sat, Dec 30, 2023 at 10:19:38AM -0600, Michael Roth wrote: > +static int rmpupdate(u64 pfn, struct rmp_state *state) > +{ > + unsigned long paddr = pfn << PAGE_SHIFT; > + int ret; > + > + if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) > + return -ENODEV; > + Let's document this deal here and leave the door open for potential future improvements: --- diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c index c9156ce47c77..1fbf9843c163 100644 --- a/arch/x86/virt/svm/sev.c +++ b/arch/x86/virt/svm/sev.c @@ -374,6 +374,21 @@ static int rmpupdate(u64 pfn, struct rmp_state *state) if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) return -ENODEV; + /* + * It is expected that those operations are seldom enough so + * that no mutual exclusion of updaters is needed and thus the + * overlap error condition below should happen very seldomly and + * would get resolved relatively quickly by the firmware. + * + * If not, one could consider introducing a mutex or so here to + * sync concurrent RMP updates and thus diminish the amount of + * cases where firmware needs to lock 2M ranges to protect + * against concurrent updates. + * + * The optimal solution would be range locking to avoid locking + * disjoint regions unnecessarily but there's no support for + * that yet. + */ do { /* Binutils version 2.36 supports the RMPUPDATE mnemonic. */ asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE" > + do { > + /* Binutils version 2.36 supports the RMPUPDATE mnemonic. */ > + asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFE" > + : "=a" (ret) > + : "a" (paddr), "c" ((unsigned long)state) > + : "memory", "cc"); > + } while (ret == RMPUPDATE_FAIL_OVERLAP); > + > + if (ret) { > + pr_err("RMPUPDATE failed for PFN %llx, ret: %d\n", pfn, ret); > + dump_rmpentry(pfn); > + dump_stack(); > + return -EFAULT; > + } > + > + return 0; > +} -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette