[RFC PATCH 4/4] x86/vdso: x86/sgx: Allow the user to exit the vDSO loop on interrupts

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Allow userspace to exit the vDSO on interrupts that are acknowledged
while the enclave is active.  This allows the user's runtime to switch
contexts at opportune times without additional overhead, e.g. when using
an M:N threading model (where M user threads run N TCSs, with N > M).

Suggested-by: Jethro Beekman <jethro@xxxxxxxxxxxx>
Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
---
 arch/x86/entry/vdso/vsgx_enter_enclave.S | 27 ++++++++++++++++++++----
 arch/x86/include/uapi/asm/sgx.h          |  3 +++
 2 files changed, 26 insertions(+), 4 deletions(-)

diff --git a/arch/x86/entry/vdso/vsgx_enter_enclave.S b/arch/x86/entry/vdso/vsgx_enter_enclave.S
index b09e87dbe9334..33428c0f94b0d 100644
--- a/arch/x86/entry/vdso/vsgx_enter_enclave.S
+++ b/arch/x86/entry/vdso/vsgx_enter_enclave.S
@@ -21,6 +21,9 @@
 
 #define SGX_SYNCHRONOUS_EXIT	0
 #define SGX_EXCEPTION_EXIT	1
+#define SGX_INTERRUPT_EXIT	2
+
+#define SGX_EXIT_ON_INTERRUPTS	1
 
 /* Offsets into sgx_enter_enclave.exception. */
 #define EX_TRAPNR	0*8
@@ -51,12 +54,17 @@ SYM_FUNC_START(__vdso_sgx_enter_enclave)
 
 	mov	RUN_OFFSET(%rbp), %rcx
 
-	/* No flags are currently defined/supported. */
-	cmpq	$0, FLAGS_OFFSET(%rcx)
-	jne	.Linvalid_input
-
 	/* Load TCS and AEP */
 	mov	TCS_OFFEST(%rcx), %rbx
+
+	/* Use the alternate AEP if the user wants to exit on interrupts. */
+	mov	FLAGS_OFFSET(%rcx), %rcx
+	cmpq	$SGX_EXIT_ON_INTERRUPTS, %rcx
+	je	.Lload_interrupts_aep
+
+	/* All other flags are reserved. */
+	test	%rcx, %rcx
+	jne	.Linvalid_input
 	lea	.Lasync_exit_pointer(%rip), %rcx
 
 	/* Single ENCLU serving as both EENTER and AEP (ERESUME) */
@@ -93,6 +101,17 @@ SYM_FUNC_START(__vdso_sgx_enter_enclave)
 	mov	$(-EINVAL), %eax
 	jmp	.Lout
 
+.Lload_interrupts_aep:
+	lea	.Lhandle_interrupt(%rip), %rcx
+	jmp	.Lenclu_eenter_eresume
+
+.Lhandle_interrupt:
+	mov	RUN_OFFSET(%rbp), %rbx
+
+	/* Set the exit_reason and exception info. */
+	movl	$SGX_INTERRUPT_EXIT, EXIT_REASON_OFFSET(%rbx)
+	jmp	.Lhandle_exit
+
 .Lhandle_exception:
 	mov	RUN_OFFSET(%rbp), %rbx
 
diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
index 80a8b7a949a23..beeabfad6eb81 100644
--- a/arch/x86/include/uapi/asm/sgx.h
+++ b/arch/x86/include/uapi/asm/sgx.h
@@ -76,6 +76,7 @@ struct sgx_enclave_set_attribute {
 
 #define SGX_SYNCHRONOUS_EXIT	0
 #define SGX_EXCEPTION_EXIT	1
+#define SGX_INTERRUPT_EXIT	2
 
 struct sgx_enclave_run;
 
@@ -116,6 +117,8 @@ struct sgx_enclave_exception {
 	__u64 address;
 };
 
+#define SGX_EXIT_ON_INTERRUPTS	(1ULL << 0)
+
 /**
  * struct sgx_enclave_run - Control structure for __vdso_sgx_enter_enclave()
  *
-- 
2.28.0




[Index of Archives]     [AMD Graphics]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux