On Fri, Oct 16, 2020 at 06:48:53PM -0700, Andy Lutomirski wrote: > On Fri, Oct 2, 2020 at 9:51 PM Jarkko Sakkinen > <jarkko.sakkinen@xxxxxxxxxxxxxxx> wrote: > > > > From: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> > > > > An SGX runtime must be aware of the exceptions, which happen inside an > > enclave. Introduce a vDSO call that wraps EENTER/ERESUME cycle and returns > > the CPU exception back to the caller exactly when it happens. > > > > Kernel fixups the exception information to RDI, RSI and RDX. The SGX call > > vDSO handler fills this information to the user provided buffer or > > alternatively trigger user provided callback at the time of the exception. > > > > The calling convention supports providing the parameters in standard RDI > > RSI, RDX, RCX, R8 and R9 registers, i.e. it is possible to declare the vDSO > > as a C prototype, but other than that there is no specific support for > > SystemV ABI. Storing XSAVE etc. is all responsibility of the enclave and > > the associated run-time. > > > > Suggested-by: Andy Lutomirski <luto@xxxxxxxxxxxxxx> > > Acked-by: Jethro Beekman <jethro@xxxxxxxxxxxx> > > Tested-by: Jethro Beekman <jethro@xxxxxxxxxxxx> > > Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> > > Co-developed-by: Cedric Xing <cedric.xing@xxxxxxxxx> > > Signed-off-by: Cedric Xing <cedric.xing@xxxxxxxxx> > > Co-developed-by: Jarkko Sakkinen <jarkko.sakkinen@xxxxxxxxxxxxxxx> > > Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@xxxxxxxxxxxxxxx> > > > +SYM_FUNC_START(__vdso_sgx_enter_enclave) > > + /* Prolog */ > > + .cfi_startproc > > + push %rbp > > + .cfi_adjust_cfa_offset 8 > > + .cfi_rel_offset %rbp, 0 > > + mov %rsp, %rbp > > + .cfi_def_cfa_register %rbp > > + push %rbx > > + .cfi_rel_offset %rbx, -8 > > This *looks* right, but I'm not really an expert. I did not change this from earlier versions. > > + > > + mov %ecx, %eax > > +.Lenter_enclave: > > + /* EENTER <= leaf <= ERESUME */ > > + cmp $EENTER, %eax > > + jb .Linvalid_input > > + cmp $ERESUME, %eax > > + ja .Linvalid_input > > + > > + mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rcx > > + > > + /* Validate that the reserved area contains only zeros. */ > > + push %rax > > + push %rbx > > This could use a .cfi_register_something_or_other for rbx Sean pointed out that saving %rbx is not necessary here: https://lore.kernel.org/linux-sgx/20201006025703.GG15803@xxxxxxxxxxxxxxx/ > > + mov $SGX_ENCLAVE_RUN_RESERVED_START, %rbx > > +1: > > + mov (%rcx, %rbx), %rax > > + cmpq $0, %rax > > + jne .Linvalid_input > > + > > + add $8, %rbx > > + cmpq $SGX_ENCLAVE_RUN_RESERVED_END, %rbx > > + jne 1b > > + pop %rbx > > This should undo it. Given private feedback from Sean, I'm replacing this with: mov $SGX_ENCLAVE_RUN_RESERVED_START, %rbx 1: cmpq $0, (%rcx, %rbx) jne .Linvalid_input add $8, %rbx cmpq $SGX_ENCLAVE_RUN_RESERVED_END, %rbx jne 1b There was bug in the error path, %rax was not popped. I did negative testing (testing both branches) for this but it went clean. I guess if I fix this, that will deal with all of your comments? > > + pop %rax > > + > > + /* Load TCS and AEP */ > > + mov SGX_ENCLAVE_RUN_TCS(%rcx), %rbx > > + lea .Lasync_exit_pointer(%rip), %rcx > > + > > + /* Single ENCLU serving as both EENTER and AEP (ERESUME) */ > > +.Lasync_exit_pointer: > > +.Lenclu_eenter_eresume: > > + enclu > > + > > + /* EEXIT jumps here unless the enclave is doing something fancy. */ > > + mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx > > + > > + /* Set exit_reason. */ > > + movl $EEXIT, SGX_ENCLAVE_RUN_LEAF(%rbx) > > + > > + /* Invoke userspace's exit handler if one was provided. */ > > +.Lhandle_exit: > > + cmpq $0, SGX_ENCLAVE_RUN_USER_HANDLER(%rbx) > > + jne .Linvoke_userspace_handler > > + > > + /* Success, in the sense that ENCLU was attempted. */ > > + xor %eax, %eax > > + > > +.Lout: > > + pop %rbx > > and this should undo the .cfi_register. > > > + leave > > + .cfi_def_cfa %rsp, 8 > > + ret > > + > > + /* The out-of-line code runs with the pre-leave stack frame. */ > > + .cfi_def_cfa %rbp, 16 > > + > > +.Linvalid_input: > > Here rbx and rax are pushed, and I guess pop rbx and leave fixes that > up, so okay. > > > + mov $(-EINVAL), %eax > > + jmp .Lout > > + > > +.Lhandle_exception: > > + mov SGX_ENCLAVE_OFFSET_OF_RUN(%rbp), %rbx > > + > > + /* Set the exception info. */ > > + mov %eax, (SGX_ENCLAVE_RUN_LEAF)(%rbx) > > + mov %di, (SGX_ENCLAVE_RUN_EXCEPTION_VECTOR)(%rbx) > > + mov %si, (SGX_ENCLAVE_RUN_EXCEPTION_ERROR_CODE)(%rbx) > > + mov %rdx, (SGX_ENCLAVE_RUN_EXCEPTION_ADDR)(%rbx) > > + jmp .Lhandle_exit > > + > > +.Linvoke_userspace_handler: > > + /* Pass the untrusted RSP (at exit) to the callback via %rcx. */ > > + mov %rsp, %rcx > > + > > + /* Save struct sgx_enclave_exception %rbx is about to be clobbered. */ > > + mov %rbx, %rax > > + > > + /* Save the untrusted RSP offset in %rbx (non-volatile register). */ > > + mov %rsp, %rbx > > + and $0xf, %rbx > > + > > + /* > > + * Align stack per x86_64 ABI. Note, %rsp needs to be 16-byte aligned > > + * _after_ pushing the parameters on the stack, hence the bonus push. > > + */ > > + and $-0x10, %rsp > > + push %rax > > + > > + /* Push struct sgx_enclave_exception as a param to the callback. */ > > + push %rax > > + > > + /* Clear RFLAGS.DF per x86_64 ABI */ > > + cld > > + > > + /* > > + * Load the callback pointer to %rax and lfence for LVI (load value > > + * injection) protection before making the call. > > + */ > > + mov SGX_ENCLAVE_RUN_USER_HANDLER(%rax), %rax > > + lfence > > + call *%rax > > + > > + /* Undo the post-exit %rsp adjustment. */ > > + lea 0x10(%rsp, %rbx), %rsp > > + > > + /* > > + * If the return from callback is zero or negative, return immediately, > > + * else re-execute ENCLU with the postive return value interpreted as > > + * the requested ENCLU leaf. > > + */ > > + cmp $0, %eax > > + jle .Lout > > + jmp .Lenter_enclave > > + > > + .cfi_endproc > > + > > +_ASM_VDSO_EXTABLE_HANDLE(.Lenclu_eenter_eresume, .Lhandle_exception) /Jarkko