From: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> Add wrappers where a direct references to the global LRU list in the reclaimer functions. To support multiple LRU lists (one per EPC cgroup) later, only make changes inside these wrappers. Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> Co-developed-by: Kristen Carlson Accardi <kristen@xxxxxxxxxxxxxxx> Signed-off-by: Kristen Carlson Accardi <kristen@xxxxxxxxxxxxxxx> Co-developed-by: Haitao Huang <haitao.huang@xxxxxxxxxxxxxxx> Signed-off-by: Haitao Huang <haitao.huang@xxxxxxxxxxxxxxx> Cc: Sean Christopherson <seanjc@xxxxxxxxxx> --- V5: - Revise commit message to make the purpose more clear. V4: - Re-organized this patch to include all changes related to encapsulation of the global LRU - Moved this patch to precede the EPC cgroup patch --- arch/x86/kernel/cpu/sgx/main.c | 41 +++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c index b34ad3574c81..d37ef0dd865f 100644 --- a/arch/x86/kernel/cpu/sgx/main.c +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -35,6 +35,16 @@ static DEFINE_XARRAY(sgx_epc_address_space); */ static struct sgx_epc_lru_lists sgx_global_lru; +static inline struct sgx_epc_lru_lists *sgx_lru_lists(struct sgx_epc_page *epc_page) +{ + return &sgx_global_lru; +} + +static inline bool sgx_can_reclaim(void) +{ + return !list_empty(&sgx_global_lru.reclaimable); +} + static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0); /* Nodes with one or more EPC sections. */ @@ -340,6 +350,7 @@ size_t sgx_reclaim_epc_pages(size_t nr_to_scan, bool ignore_age) struct sgx_backing backing[SGX_NR_TO_SCAN_MAX]; struct sgx_epc_page *epc_page, *tmp; struct sgx_encl_page *encl_page; + struct sgx_epc_lru_lists *lru; pgoff_t page_index; LIST_HEAD(iso); size_t ret, i; @@ -372,10 +383,11 @@ size_t sgx_reclaim_epc_pages(size_t nr_to_scan, bool ignore_age) continue; skip: - spin_lock(&sgx_global_lru.lock); + lru = sgx_lru_lists(epc_page); + spin_lock(&lru->lock); sgx_epc_page_set_state(epc_page, SGX_EPC_PAGE_RECLAIMABLE); - list_move_tail(&epc_page->list, &sgx_global_lru.reclaimable); - spin_unlock(&sgx_global_lru.lock); + list_move_tail(&epc_page->list, &lru->reclaimable); + spin_unlock(&lru->lock); kref_put(&encl_page->encl->refcount, sgx_encl_release); } @@ -400,7 +412,7 @@ size_t sgx_reclaim_epc_pages(size_t nr_to_scan, bool ignore_age) static bool sgx_should_reclaim(unsigned long watermark) { return atomic_long_read(&sgx_nr_free_pages) < watermark && - !list_empty(&sgx_global_lru.reclaimable); + sgx_can_reclaim(); } /* @@ -530,14 +542,16 @@ struct sgx_epc_page *__sgx_alloc_epc_page(void) */ void sgx_record_epc_page(struct sgx_epc_page *page, unsigned long flags) { - spin_lock(&sgx_global_lru.lock); + struct sgx_epc_lru_lists *lru = sgx_lru_lists(page); + + spin_lock(&lru->lock); WARN_ON_ONCE(sgx_epc_page_reclaimable(page->flags)); page->flags |= flags; if (sgx_epc_page_reclaimable(flags)) - list_add_tail(&page->list, &sgx_global_lru.reclaimable); + list_add_tail(&page->list, &lru->reclaimable); else - list_add_tail(&page->list, &sgx_global_lru.unreclaimable); - spin_unlock(&sgx_global_lru.lock); + list_add_tail(&page->list, &lru->unreclaimable); + spin_unlock(&lru->lock); } /** @@ -552,15 +566,16 @@ void sgx_record_epc_page(struct sgx_epc_page *page, unsigned long flags) */ int sgx_drop_epc_page(struct sgx_epc_page *page) { - spin_lock(&sgx_global_lru.lock); + struct sgx_epc_lru_lists *lru = sgx_lru_lists(page); + + spin_lock(&lru->lock); if (sgx_epc_page_reclaim_in_progress(page->flags)) { - spin_unlock(&sgx_global_lru.lock); + spin_unlock(&lru->lock); return -EBUSY; } - list_del(&page->list); sgx_epc_page_reset_state(page); - spin_unlock(&sgx_global_lru.lock); + spin_unlock(&lru->lock); return 0; } @@ -593,7 +608,7 @@ struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim) break; } - if (list_empty(&sgx_global_lru.reclaimable)) + if (!sgx_can_reclaim()) return ERR_PTR(-ENOMEM); if (!reclaim) { -- 2.25.1