Looking at the VMWARE_VMCALL(cmd, eax, ebx, ecx, edx) definition, it seems to me only 4 registers are required to be shared with hypervisor. I don't know much about vmware but is not vmware_sev_es_hcall_prepare expose more registers than needed and also vmware_sev_es_hcall_finish might let the hypvervisor to modify additional registers which are not used? Just checking if this is intentional and what I am missing here. Thanks -Erdem On Thu, Sep 10, 2020 at 2:23 AM tip-bot2 for Doug Covelli <tip-bot2@xxxxxxxxxxxxx> wrote: > > The following commit has been merged into the x86/seves branch of tip: > > Commit-ID: 1a222de8dcfb903d039810b0823570ee0be4e6c6 > Gitweb: https://git.kernel.org/tip/1a222de8dcfb903d039810b0823570ee0be4e6c6 > Author: Doug Covelli <dcovelli@xxxxxxxxxx> > AuthorDate: Mon, 07 Sep 2020 15:16:05 +02:00 > Committer: Borislav Petkov <bp@xxxxxxx> > CommitterDate: Wed, 09 Sep 2020 11:33:20 +02:00 > > x86/vmware: Add VMware-specific handling for VMMCALL under SEV-ES > > Add VMware-specific handling for #VC faults caused by VMMCALL > instructions. > > Signed-off-by: Doug Covelli <dcovelli@xxxxxxxxxx> > Signed-off-by: Tom Lendacky <thomas.lendacky@xxxxxxx> > [ jroedel@xxxxxxx: - Adapt to different paravirt interface ] > Co-developed-by: Joerg Roedel <jroedel@xxxxxxx> > Signed-off-by: Joerg Roedel <jroedel@xxxxxxx> > Signed-off-by: Borislav Petkov <bp@xxxxxxx> > Link: https://lkml.kernel.org/r/20200907131613.12703-65-joro@xxxxxxxxxx > --- > arch/x86/kernel/cpu/vmware.c | 50 +++++++++++++++++++++++++++++++---- > 1 file changed, 45 insertions(+), 5 deletions(-) > > diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c > index 9b6fafa..924571f 100644 > --- a/arch/x86/kernel/cpu/vmware.c > +++ b/arch/x86/kernel/cpu/vmware.c > @@ -33,6 +33,7 @@ > #include <asm/timer.h> > #include <asm/apic.h> > #include <asm/vmware.h> > +#include <asm/svm.h> > > #undef pr_fmt > #define pr_fmt(fmt) "vmware: " fmt > @@ -476,10 +477,49 @@ static bool __init vmware_legacy_x2apic_available(void) > (eax & (1 << VMWARE_CMD_LEGACY_X2APIC)) != 0; > } > > +#ifdef CONFIG_AMD_MEM_ENCRYPT > +static void vmware_sev_es_hcall_prepare(struct ghcb *ghcb, > + struct pt_regs *regs) > +{ > + /* Copy VMWARE specific Hypercall parameters to the GHCB */ > + ghcb_set_rip(ghcb, regs->ip); > + ghcb_set_rbx(ghcb, regs->bx); > + ghcb_set_rcx(ghcb, regs->cx); > + ghcb_set_rdx(ghcb, regs->dx); > + ghcb_set_rsi(ghcb, regs->si); > + ghcb_set_rdi(ghcb, regs->di); > + ghcb_set_rbp(ghcb, regs->bp); > +} > + > +static bool vmware_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs) > +{ > + if (!(ghcb_rbx_is_valid(ghcb) && > + ghcb_rcx_is_valid(ghcb) && > + ghcb_rdx_is_valid(ghcb) && > + ghcb_rsi_is_valid(ghcb) && > + ghcb_rdi_is_valid(ghcb) && > + ghcb_rbp_is_valid(ghcb))) > + return false; > + > + regs->bx = ghcb->save.rbx; > + regs->cx = ghcb->save.rcx; > + regs->dx = ghcb->save.rdx; > + regs->si = ghcb->save.rsi; > + regs->di = ghcb->save.rdi; > + regs->bp = ghcb->save.rbp; > + > + return true; > +} > +#endif > + > const __initconst struct hypervisor_x86 x86_hyper_vmware = { > - .name = "VMware", > - .detect = vmware_platform, > - .type = X86_HYPER_VMWARE, > - .init.init_platform = vmware_platform_setup, > - .init.x2apic_available = vmware_legacy_x2apic_available, > + .name = "VMware", > + .detect = vmware_platform, > + .type = X86_HYPER_VMWARE, > + .init.init_platform = vmware_platform_setup, > + .init.x2apic_available = vmware_legacy_x2apic_available, > +#ifdef CONFIG_AMD_MEM_ENCRYPT > + .runtime.sev_es_hcall_prepare = vmware_sev_es_hcall_prepare, > + .runtime.sev_es_hcall_finish = vmware_sev_es_hcall_finish, > +#endif > };