Take the first step integrating sgx_ewb() i.e. just fill struct sgx_ewb_context inside __sgx_encl_ewb() and call sgx_ewb(). The 2nd step isto move resource binding up to the part where the page is picked up to the reclaiming process. Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@xxxxxxxxxxxxxxx> --- arch/x86/kernel/cpu/sgx/reclaim.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/arch/x86/kernel/cpu/sgx/reclaim.c b/arch/x86/kernel/cpu/sgx/reclaim.c index e2b978664f9d..4542d47f48a5 100644 --- a/arch/x86/kernel/cpu/sgx/reclaim.c +++ b/arch/x86/kernel/cpu/sgx/reclaim.c @@ -225,8 +225,7 @@ static int __sgx_encl_ewb(struct sgx_encl *encl, struct sgx_epc_page *epc_page, unsigned int pt) { struct sgx_encl_page *encl_page = epc_page->owner; - struct sgx_pageinfo pginfo; - unsigned long pcmd_offset; + struct sgx_ewb_context ctx; struct page *backing; pgoff_t page_index; pgoff_t pcmd_index; @@ -243,7 +242,6 @@ static int __sgx_encl_ewb(struct sgx_encl *encl, struct sgx_epc_page *epc_page, page_index = SGX_ENCL_PAGE_INDEX(encl_page); pcmd_index = sgx_pcmd_index(encl, page_index); - pcmd_offset = sgx_pcmd_offset(page_index); backing = sgx_encl_get_backing_page(encl, page_index); if (IS_ERR(backing)) { @@ -257,14 +255,14 @@ static int __sgx_encl_ewb(struct sgx_encl *encl, struct sgx_epc_page *epc_page, goto err_pcmd; } - pginfo.addr = 0; - pginfo.contents = (unsigned long)kmap_atomic(backing); - pginfo.metadata = (unsigned long)kmap_atomic(pcmd) + pcmd_offset; - pginfo.secs = 0; - ret = __ewb(&pginfo, sgx_epc_addr(epc_page), - sgx_epc_addr(va_page->epc_page) + va_offset); - kunmap_atomic((void *)(unsigned long)(pginfo.metadata - pcmd_offset)); - kunmap_atomic((void *)(unsigned long)pginfo.contents); + ctx.page = epc_page; + ctx.contents = backing; + ctx.pcmd = pcmd; + ctx.pcmd_offset = sgx_pcmd_offset(page_index); + ctx.version_array = va_page->epc_page; + ctx.version_offset = va_offset; + + ret = sgx_ewb(&ctx); if (!ret) { set_page_dirty(pcmd); -- 2.20.1