Refactor compact_node() to handle both proactive and synchronous compact memory, which cleanups code a bit. Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx> --- mm/compaction.c | 66 +++++++++++++++++-------------------------------- 1 file changed, 23 insertions(+), 43 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index e63a4ee7e029..f2d886a88ee1 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2884,26 +2884,17 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, return rc; } -/* - * Compact all zones within a node till each zone's fragmentation score - * reaches within proactive compaction thresholds (as determined by the - * proactiveness tunable). - * - * It is possible that the function returns before reaching score targets - * due to various back-off conditions, such as, contention on per-node or - * per-zone locks. - */ -static void proactive_compact_node(pg_data_t *pgdat) +static void compact_node(pg_data_t *pgdat, bool proactive) { int zoneid; struct zone *zone; struct compact_control cc = { .order = -1, - .mode = MIGRATE_SYNC_LIGHT, + .mode = proactive ? MIGRATE_SYNC_LIGHT : MIGRATE_SYNC, .ignore_skip_hint = true, .whole_zone = true, .gfp_mask = GFP_KERNEL, - .proactive_compaction = true, + .proactive_compaction = proactive, }; for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { @@ -2915,41 +2906,30 @@ static void proactive_compact_node(pg_data_t *pgdat) compact_zone(&cc, NULL); - count_compact_events(KCOMPACTD_MIGRATE_SCANNED, - cc.total_migrate_scanned); - count_compact_events(KCOMPACTD_FREE_SCANNED, - cc.total_free_scanned); + if (proactive) { + count_compact_events(KCOMPACTD_MIGRATE_SCANNED, + cc.total_migrate_scanned); + count_compact_events(KCOMPACTD_FREE_SCANNED, + cc.total_free_scanned); + } } } -/* Compact all zones within a node */ -static void compact_node(int nid) +/* + * Compact all zones within a node till each zone's fragmentation score + * reaches within proactive compaction thresholds (as determined by the + * proactiveness tunable). + * + * It is possible that the function returns before reaching score targets + * due to various back-off conditions, such as, contention on per-node or + * per-zone locks. + */ +static void proactive_compact_node(pg_data_t *pgdat) { - pg_data_t *pgdat = NODE_DATA(nid); - int zoneid; - struct zone *zone; - struct compact_control cc = { - .order = -1, - .mode = MIGRATE_SYNC, - .ignore_skip_hint = true, - .whole_zone = true, - .gfp_mask = GFP_KERNEL, - }; - - - for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { - - zone = &pgdat->node_zones[zoneid]; - if (!populated_zone(zone)) - continue; - - cc.zone = zone; - - compact_zone(&cc, NULL); - } + compact_node(pgdat, true); } -/* Compact all nodes in the system */ +/* Compact all zones of all nodes in the system */ static void compact_nodes(void) { int nid; @@ -2958,7 +2938,7 @@ static void compact_nodes(void) lru_add_drain_all(); for_each_online_node(nid) - compact_node(nid); + compact_node(NODE_DATA(nid), false); } static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, @@ -3020,7 +3000,7 @@ static ssize_t compact_store(struct device *dev, /* Flush pending updates to the LRU lists */ lru_add_drain_all(); - compact_node(nid); + compact_node(NODE_DATA(nid), false); } return count; -- 2.27.0