On Thu, Oct 20, 2022, Maxim Levitsky wrote: > This adds minimum amout of code to support tests that > run SVM on more that one vCPU. s/that/than > > Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> > --- > lib/x86/svm_lib.c | 9 + > lib/x86/svm_lib.h | 10 + > x86/svm.c | 37 ++- > x86/svm.h | 5 +- > x86/svm_npt.c | 44 ++-- > x86/svm_tests.c | 615 +++++++++++++++++++++++----------------------- > 6 files changed, 362 insertions(+), 358 deletions(-) > > diff --git a/lib/x86/svm_lib.c b/lib/x86/svm_lib.c > index 2b067c65..1152c497 100644 > --- a/lib/x86/svm_lib.c > +++ b/lib/x86/svm_lib.c > @@ -157,3 +157,12 @@ void vmcb_ident(struct vmcb *vmcb) > ctrl->tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; > } > } > + > +void svm_vcpu_init(struct svm_vcpu *vcpu) > +{ > + vcpu->vmcb = alloc_page(); > + vmcb_ident(vcpu->vmcb); > + memset(&vcpu->regs, 0, sizeof(vcpu->regs)); > + vcpu->stack = alloc_pages(4) + (PAGE_SIZE << 4); > + vcpu->vmcb->save.rsp = (ulong)(vcpu->stack); > +} > diff --git a/lib/x86/svm_lib.h b/lib/x86/svm_lib.h > index 59db26de..c6957dba 100644 > --- a/lib/x86/svm_lib.h > +++ b/lib/x86/svm_lib.h > @@ -89,6 +89,16 @@ struct svm_extra_regs > u64 r15; > }; > > + > +struct svm_vcpu > +{ > + struct vmcb *vmcb; > + struct svm_extra_regs regs; > + void *stack; > +}; > + > +void svm_vcpu_init(struct svm_vcpu *vcpu); > + > #define SWAP_GPRS(reg) \ > "xchg %%rcx, 0x08(%%" reg ")\n\t" \ > "xchg %%rdx, 0x10(%%" reg ")\n\t" \ > diff --git a/x86/svm.c b/x86/svm.c > index 9484a6d1..7aa3ebd2 100644 > --- a/x86/svm.c > +++ b/x86/svm.c > @@ -16,7 +16,7 @@ > #include "apic.h" > #include "svm_lib.h" > > -struct vmcb *vmcb; > +struct svm_vcpu vcpu0; It's not strictly vCPU0, e.g. svm_init_intercept_test() deliberately runs on vCPU2, presumably to avoid running on the BSP? Since this is churning a lot of code anyways, why not clean this all up and have run_svm_tests() dynamically allocate state instead of relying on global data?