From: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> Modify sgx_reclaim_pages() to take a parameter that specifies the number of pages to scan for reclaiming. Specify a max value of 32, but scan 16 in the usual case. This allows the number of pages sgx_reclaim_pages() scans to be specified by the caller, and adjusted in future patches. Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> Signed-off-by: Kristen Carlson Accardi <kristen@xxxxxxxxxxxxxxx> Cc: Sean Christopherson <seanjc@xxxxxxxxxx> --- arch/x86/kernel/cpu/sgx/main.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c index 3c0d33b72896..0010ed1b2e98 100644 --- a/arch/x86/kernel/cpu/sgx/main.c +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -18,6 +18,8 @@ #include "encl.h" #include "encls.h" +#define SGX_MAX_NR_TO_RECLAIM 32 + struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS]; static int sgx_nr_epc_sections; static struct task_struct *ksgxd_tsk; @@ -273,7 +275,10 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page, mutex_unlock(&encl->lock); } -/* +/** + * sgx_reclaim_pages() - Reclaim EPC pages from the consumers + * @nr_to_scan: Number of EPC pages to scan for reclaim + * * Take a fixed number of pages from the head of the active page pool and * reclaim them to the enclave's private shmem files. Skip the pages, which have * been accessed since the last scan. Move those pages to the tail of active @@ -286,9 +291,9 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page, * problematic as it would increase the lock contention too much, which would * halt forward progress. */ -static void sgx_reclaim_pages(void) +static void sgx_reclaim_pages(int nr_to_scan) { - struct sgx_backing backing[SGX_NR_TO_SCAN]; + struct sgx_backing backing[SGX_MAX_NR_TO_RECLAIM]; struct sgx_encl_page *encl_page; struct sgx_epc_page *epc_page, *tmp; pgoff_t page_index; @@ -297,7 +302,7 @@ static void sgx_reclaim_pages(void) int i; spin_lock(&sgx_global_lru.lock); - for (i = 0; i < SGX_NR_TO_SCAN; i++) { + for (i = 0; i < nr_to_scan; i++) { if (list_empty(&sgx_global_lru.reclaimable)) break; @@ -327,7 +332,7 @@ static void sgx_reclaim_pages(void) list_for_each_entry_safe(epc_page, tmp, &iso, list) { encl_page = epc_page->owner; - if (!sgx_reclaimer_age(epc_page)) + if (i == SGX_MAX_NR_TO_RECLAIM || !sgx_reclaimer_age(epc_page)) goto skip; page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base); @@ -384,7 +389,7 @@ static bool sgx_should_reclaim(unsigned long watermark) void sgx_reclaim_direct(void) { if (sgx_should_reclaim(SGX_NR_LOW_PAGES)) - sgx_reclaim_pages(); + sgx_reclaim_pages(SGX_NR_TO_SCAN); } static int ksgxd(void *p) @@ -410,7 +415,7 @@ static int ksgxd(void *p) sgx_should_reclaim(SGX_NR_HIGH_PAGES)); if (sgx_should_reclaim(SGX_NR_HIGH_PAGES)) - sgx_reclaim_pages(); + sgx_reclaim_pages(SGX_NR_TO_SCAN); } return 0; @@ -581,7 +586,7 @@ struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim) break; } - sgx_reclaim_pages(); + sgx_reclaim_pages(SGX_NR_TO_SCAN); } if (sgx_should_reclaim(SGX_NR_LOW_PAGES)) -- 2.37.3