Move the post-reclaim half of sgx_free_page() to a standalone helper so that it can be used in flows where the page is known to be non-reclaimable. Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> --- arch/x86/kernel/cpu/sgx/main.c | 44 ++++++++++++++++++++++++++-------- arch/x86/kernel/cpu/sgx/sgx.h | 1 + 2 files changed, 35 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c index 718fd5590608..083d9a589882 100644 --- a/arch/x86/kernel/cpu/sgx/main.c +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -103,6 +103,39 @@ struct sgx_epc_page *sgx_alloc_page(void *owner, bool reclaim) return entry; } + +/** + * __sgx_free_page() - Free an EPC page + * @page: pointer a previously allocated EPC page + * + * EREMOVE an EPC page and insert it back to the list of free pages. The page + * must not be reclaimable. + */ +void __sgx_free_page(struct sgx_epc_page *page) +{ + struct sgx_epc_section *section; + int ret; + + /* + * Don't take sgx_active_page_list_lock when asserting the page isn't + * reclaimable, missing a WARN in the very rare case is preferable to + * unnecessarily taking a global lock in the common case. + */ + WARN_ON_ONCE(page->desc & SGX_EPC_PAGE_RECLAIMABLE); + + ret = __eremove(sgx_epc_addr(page)); + if (WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret)) + return; + + section = sgx_epc_section(page); + + spin_lock(§ion->lock); + list_add_tail(&page->list, §ion->page_list); + sgx_nr_free_pages++; + spin_unlock(§ion->lock); + +} + /** * sgx_free_page() - Free an EPC page * @page: pointer a previously allocated EPC page @@ -116,9 +149,6 @@ struct sgx_epc_page *sgx_alloc_page(void *owner, bool reclaim) */ int sgx_free_page(struct sgx_epc_page *page) { - struct sgx_epc_section *section = sgx_epc_section(page); - int ret; - /* * Remove the page from the active list if necessary. If the page * is actively being reclaimed, i.e. RECLAIMABLE is set but the @@ -136,13 +166,7 @@ int sgx_free_page(struct sgx_epc_page *page) } spin_unlock(&sgx_active_page_list_lock); - ret = __eremove(sgx_epc_addr(page)); - WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret); - - spin_lock(§ion->lock); - list_add_tail(&page->list, §ion->page_list); - sgx_nr_free_pages++; - spin_unlock(§ion->lock); + __sgx_free_page(page); return 0; } diff --git a/arch/x86/kernel/cpu/sgx/sgx.h b/arch/x86/kernel/cpu/sgx/sgx.h index 160a3c996ef6..87e375e8c25e 100644 --- a/arch/x86/kernel/cpu/sgx/sgx.h +++ b/arch/x86/kernel/cpu/sgx/sgx.h @@ -85,6 +85,7 @@ void sgx_reclaim_pages(void); struct sgx_epc_page *sgx_try_alloc_page(void); struct sgx_epc_page *sgx_alloc_page(void *owner, bool reclaim); +void __sgx_free_page(struct sgx_epc_page *page); int sgx_free_page(struct sgx_epc_page *page); #endif /* _X86_SGX_H */ -- 2.22.0