In damon_lru_sort_new_hot_scheme() and damon_lru_sort_new_cold_scheme(), they have so much in common, so we can combine them into a single function, and we just need to distinguish their differences. Signed-off-by: Xin Hao <xhao@xxxxxxxxxxxxxxxxx> --- mm/damon/lru_sort.c | 82 +++++++++++++++++---------------------------- 1 file changed, 30 insertions(+), 52 deletions(-) diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c index 8415e18fcf0e..62063ed43224 100644 --- a/mm/damon/lru_sort.c +++ b/mm/damon/lru_sort.c @@ -257,15 +257,15 @@ module_param(nr_cold_quota_exceeds, ulong, 0400); static struct damon_ctx *ctx; static struct damon_target *target; -/* Create a DAMON-based operation scheme for hot memory regions */ -static struct damos *damon_lru_sort_new_hot_scheme(unsigned int hot_thres) +static inline struct damos *damon_lru_sort_new_scheme(unsigned int thres, + enum damos_action action) { struct damos_access_pattern pattern = { /* Find regions having PAGE_SIZE or larger size */ .min_sz_region = PAGE_SIZE, .max_sz_region = ULONG_MAX, /* and accessed for more than the threshold */ - .min_nr_accesses = hot_thres, + .min_nr_accesses = 0, .max_nr_accesses = UINT_MAX, /* no matter its age */ .min_age_region = 0, @@ -292,60 +292,38 @@ static struct damos *damon_lru_sort_new_hot_scheme(unsigned int hot_thres) .weight_age = 0, }; - return damon_new_scheme( - &pattern, - /* prioritize those on LRU lists, as soon as found */ - DAMOS_LRU_PRIO, - /* under the quota. */ - "a, - /* (De)activate this according to the watermarks. */ - &wmarks); + switch (action) { + case DAMOS_LRU_PRIO: + pattern.min_nr_accesses = thres; + break; + case DAMOS_LRU_DEPRIO: + pattern.min_age_region = thres; + quota.weight_nr_accesses = 0; + quota.weight_age = 1; + break; + default: + return NULL; + } + + return damon_new_scheme(&pattern, + /* mark those as not accessed, as soon as found */ + action, + /* under the quota. */ + "a, + /* (De)activate this according to the watermarks. */ + &wmarks); +} + +/* Create a DAMON-based operation scheme for hot memory regions */ +static struct damos *damon_lru_sort_new_hot_scheme(unsigned int hot_thres) +{ + return damon_lru_sort_new_scheme(hot_thres, DAMOS_LRU_PRIO); } /* Create a DAMON-based operation scheme for cold memory regions */ static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres) { - struct damos_access_pattern pattern = { - /* Find regions having PAGE_SIZE or larger size */ - .min_sz_region = PAGE_SIZE, - .max_sz_region = ULONG_MAX, - /* and not accessed at all */ - .min_nr_accesses = 0, - .max_nr_accesses = 0, - /* for min_age or more micro-seconds */ - .min_age_region = cold_thres, - .max_age_region = UINT_MAX, - }; - struct damos_watermarks wmarks = { - .metric = DAMOS_WMARK_FREE_MEM_RATE, - .interval = wmarks_interval, - .high = wmarks_high, - .mid = wmarks_mid, - .low = wmarks_low, - }; - struct damos_quota quota = { - /* - * Do not try LRU-lists sorting of cold pages for more than - * half of quota_ms milliseconds within - * quota_reset_interval_ms. - */ - .ms = quota_ms / 2, - .sz = 0, - .reset_interval = quota_reset_interval_ms, - /* Within the quota, mark colder regions not accessed first. */ - .weight_sz = 0, - .weight_nr_accesses = 0, - .weight_age = 1, - }; - - return damon_new_scheme( - &pattern, - /* mark those as not accessed, as soon as found */ - DAMOS_LRU_DEPRIO, - /* under the quota. */ - "a, - /* (De)activate this according to the watermarks. */ - &wmarks); + return damon_lru_sort_new_scheme(cold_thres, DAMOS_LRU_DEPRIO); } static int damon_lru_sort_apply_parameters(void) -- 2.31.0