Do enclave state checks only in sgx_reclaimer_write(). Checking the enclave state is not part of the sgx_encl_ewb() flow. The check is done differently for SECS and for addressable pages. Cc: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> Cc: Shay Katz-zamir <shay.katz-zamir@xxxxxxxxx> Cc: Serge Ayoun <serge.ayoun@xxxxxxxxx> Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@xxxxxxxxxxxxxxx> --- arch/x86/kernel/cpu/sgx/reclaim.c | 69 +++++++++++++++---------------- 1 file changed, 34 insertions(+), 35 deletions(-) diff --git a/arch/x86/kernel/cpu/sgx/reclaim.c b/arch/x86/kernel/cpu/sgx/reclaim.c index e758a06919e4..a3e36f959c74 100644 --- a/arch/x86/kernel/cpu/sgx/reclaim.c +++ b/arch/x86/kernel/cpu/sgx/reclaim.c @@ -308,47 +308,45 @@ static void sgx_encl_ewb(struct sgx_epc_page *epc_page, encl_page->desc &= ~SGX_ENCL_PAGE_RECLAIMED; - if (!(atomic_read(&encl->flags) & SGX_ENCL_DEAD)) { - va_page = list_first_entry(&encl->va_pages, struct sgx_va_page, - list); - va_offset = sgx_alloc_va_slot(va_page); - if (sgx_va_page_full(va_page)) - list_move_tail(&va_page->list, &encl->va_pages); + va_page = list_first_entry(&encl->va_pages, struct sgx_va_page, + list); + va_offset = sgx_alloc_va_slot(va_page); + if (sgx_va_page_full(va_page)) + list_move_tail(&va_page->list, &encl->va_pages); + + ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset, + page_index); + if (ret == SGX_NOT_TRACKED) { + ret = __etrack(sgx_epc_addr(encl->secs.epc_page)); + if (ret) { + if (encls_failed(ret) || + encls_returned_code(ret)) + ENCLS_WARN(ret, "ETRACK"); + } ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset, page_index); if (ret == SGX_NOT_TRACKED) { - ret = __etrack(sgx_epc_addr(encl->secs.epc_page)); - if (ret) { - if (encls_failed(ret) || - encls_returned_code(ret)) - ENCLS_WARN(ret, "ETRACK"); - } - - ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset, - page_index); - if (ret == SGX_NOT_TRACKED) { - /* - * Slow path, send IPIs to kick cpus out of the - * enclave. Note, it's imperative that the cpu - * mask is generated *after* ETRACK, else we'll - * miss cpus that entered the enclave between - * generating the mask and incrementing epoch. - */ - on_each_cpu_mask(sgx_encl_ewb_cpumask(encl), - sgx_ipi_cb, NULL, 1); - ret = __sgx_encl_ewb(encl, epc_page, va_page, - va_offset, page_index); - } + /* + * Slow path, send IPIs to kick cpus out of the + * enclave. Note, it's imperative that the cpu + * mask is generated *after* ETRACK, else we'll + * miss cpus that entered the enclave between + * generating the mask and incrementing epoch. + */ + on_each_cpu_mask(sgx_encl_ewb_cpumask(encl), + sgx_ipi_cb, NULL, 1); + ret = __sgx_encl_ewb(encl, epc_page, va_page, + va_offset, page_index); } + } - if (ret) - if (encls_failed(ret) || encls_returned_code(ret)) - ENCLS_WARN(ret, "EWB"); + if (ret) + if (encls_failed(ret) || encls_returned_code(ret)) + ENCLS_WARN(ret, "EWB"); - encl_page->desc |= va_offset; - encl_page->va_page = va_page; - } + encl_page->desc |= va_offset; + encl_page->va_page = va_page; } static void sgx_reclaimer_write(struct sgx_epc_page *epc_page) @@ -359,10 +357,11 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page) mutex_lock(&encl->lock); - sgx_encl_ewb(epc_page, SGX_ENCL_PAGE_INDEX(encl_page)); if (atomic_read(&encl->flags) & SGX_ENCL_DEAD) { ret = __eremove(sgx_epc_addr(epc_page)); WARN(ret, "EREMOVE returned %d\n", ret); + } else { + sgx_encl_ewb(epc_page, SGX_ENCL_PAGE_INDEX(encl_page)); } encl_page->epc_page = NULL; -- 2.20.1