Re: [PATCH] mm/vmscan: fix incorrect return type for cgroup_reclaim()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



qiwuchen55@xxxxxxxxx writes:
From: chenqiwu <chenqiwu@xxxxxxxxxx>

The return type of cgroup_reclaim() is bool, but the correct type
should be struct mem_cgroup pointer. As a result, cgroup_reclaim()
can be used to wrap sc->target_mem_cgroup in vmscan code.

The code changes looks okay to me, but the changelog and patch subject could do with some improvement. For example, the type isn't "incorrect" before and "correct" now, per se, it's just coercing from *struct mem_cgroup to bool.

Could you make it more clear what your intent is in the patch? If it's that you found the code confusing, you can just write something like:

    mm/vmscan: return target_mem_cgroup from cgroup_reclaim

Previously the code splits apart the action of checking whether we are in cgroup reclaim from getting the target memory cgroup for that reclaim. This split is confusing and seems unnecessary, since cgroup_reclaim itself only checks if sc->target_mem_cgroup is NULL or not, so merge the two use cases into one by returning the target_mem_cgroup itself from cgroup_reclaim.

Signed-off-by: chenqiwu <chenqiwu@xxxxxxxxxx>

After improving the patch title and summary, you can add:

Acked-by: Chris Down <chris@xxxxxxxxxxxxxx>

---
mm/vmscan.c | 39 +++++++++++++++++++++------------------
1 file changed, 21 insertions(+), 18 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index dca623d..c795fc3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -238,7 +238,7 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
	up_write(&shrinker_rwsem);
}

-static bool cgroup_reclaim(struct scan_control *sc)
+static struct mem_cgroup *cgroup_reclaim(struct scan_control *sc)
{
	return sc->target_mem_cgroup;
}
@@ -276,9 +276,9 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
{
}

-static bool cgroup_reclaim(struct scan_control *sc)
+static struct mem_cgroup *cgroup_reclaim(struct scan_control *sc)
{
-	return false;
+	return NULL;
}

static bool writeback_throttling_sane(struct scan_control *sc)
@@ -984,7 +984,7 @@ static enum page_references page_check_references(struct page *page,
	int referenced_ptes, referenced_page;
	unsigned long vm_flags;

-	referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
+	referenced_ptes = page_referenced(page, 1, cgroup_reclaim(sc),
					  &vm_flags);
	referenced_page = TestClearPageReferenced(page);

@@ -1422,7 +1422,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
			count_vm_event(PGLAZYFREED);
			count_memcg_page_event(page, PGLAZYFREED);
		} else if (!mapping || !__remove_mapping(mapping, page, true,
-							 sc->target_mem_cgroup))
+							 cgroup_reclaim(sc)))
			goto keep_locked;

		unlock_page(page);
@@ -1907,6 +1907,7 @@ static int current_may_throttle(void)
	enum vm_event_item item;
	struct pglist_data *pgdat = lruvec_pgdat(lruvec);
	struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
+	struct mem_cgroup *target_memcg = cgroup_reclaim(sc);
	bool stalled = false;

	while (unlikely(too_many_isolated(pgdat, file, sc))) {
@@ -1933,7 +1934,7 @@ static int current_may_throttle(void)
	reclaim_stat->recent_scanned[file] += nr_taken;

	item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
-	if (!cgroup_reclaim(sc))
+	if (!target_memcg)
		__count_vm_events(item, nr_scanned);
	__count_memcg_events(lruvec_memcg(lruvec), item, nr_scanned);
	spin_unlock_irq(&pgdat->lru_lock);
@@ -1947,7 +1948,7 @@ static int current_may_throttle(void)
	spin_lock_irq(&pgdat->lru_lock);

	item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
-	if (!cgroup_reclaim(sc))
+	if (!target_memcg)
		__count_vm_events(item, nr_reclaimed);
	__count_memcg_events(lruvec_memcg(lruvec), item, nr_reclaimed);
	reclaim_stat->recent_rotated[0] += stat.nr_activate[0];
@@ -2041,7 +2042,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
			}
		}

-		if (page_referenced(page, 0, sc->target_mem_cgroup,
+		if (page_referenced(page, 0, cgroup_reclaim(sc),
				    &vm_flags)) {
			nr_rotated += hpage_nr_pages(page);
			/*
@@ -2625,7 +2626,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,

static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
{
-	struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
+	struct mem_cgroup *target_memcg = cgroup_reclaim(sc);
	struct mem_cgroup *memcg;

	memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
@@ -2686,10 +2687,11 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
	struct reclaim_state *reclaim_state = current->reclaim_state;
	unsigned long nr_reclaimed, nr_scanned;
	struct lruvec *target_lruvec;
+	struct mem_cgroup *target_memcg = cgroup_reclaim(sc);
	bool reclaimable = false;
	unsigned long file;

-	target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
+	target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);

again:
	memset(&sc->nr, 0, sizeof(sc->nr));
@@ -2744,7 +2746,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
	 * thrashing file LRU becomes infinitely more attractive than
	 * anon pages.  Try to detect this based on file LRU size.
	 */
-	if (!cgroup_reclaim(sc)) {
+	if (!target_memcg) {
		unsigned long total_high_wmark = 0;
		unsigned long free, anon;
		int z;
@@ -2782,7 +2784,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
	}

	/* Record the subtree's reclaim efficiency */
-	vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
+	vmpressure(sc->gfp_mask, target_memcg, true,
		   sc->nr_scanned - nr_scanned,
		   sc->nr_reclaimed - nr_reclaimed);

@@ -2833,7 +2835,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
	 * stalling in wait_iff_congested().
	 */
	if ((current_is_kswapd() ||
-	     (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) &&
+	     (target_memcg && writeback_throttling_sane(sc))) &&
	    sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
		set_bit(LRUVEC_CONGESTED, &target_lruvec->flags);

@@ -3020,14 +3022,15 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
	pg_data_t *last_pgdat;
	struct zoneref *z;
	struct zone *zone;
+	struct mem_cgroup *target_memcg = cgroup_reclaim(sc);
retry:
	delayacct_freepages_start();

-	if (!cgroup_reclaim(sc))
+	if (!target_memcg)
		__count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1);

	do {
-		vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
+		vmpressure_prio(sc->gfp_mask, target_memcg,
				sc->priority);
		sc->nr_scanned = 0;
		shrink_zones(zonelist, sc);
@@ -3053,12 +3056,12 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
			continue;
		last_pgdat = zone->zone_pgdat;

-		snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
+		snapshot_refaults(target_memcg, zone->zone_pgdat);

-		if (cgroup_reclaim(sc)) {
+		if (target_memcg) {
			struct lruvec *lruvec;

-			lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup,
+			lruvec = mem_cgroup_lruvec(target_memcg,
						   zone->zone_pgdat);
			clear_bit(LRUVEC_CONGESTED, &lruvec->flags);
		}
--
1.9.1






[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux