Patch "mm: move zone->pages_scanned into a vmstat counter" has been added to the 3.14-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    mm: move zone->pages_scanned into a vmstat counter

to the 3.14-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     mm-move-zone-pages_scanned-into-a-vmstat-counter.patch
and it can be found in the queue-3.14 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.


>From 0d5d823ab4e608ec7b52ac4410de4cb74bbe0edd Mon Sep 17 00:00:00 2001
From: Mel Gorman <mgorman@xxxxxxx>
Date: Wed, 6 Aug 2014 16:07:16 -0700
Subject: mm: move zone->pages_scanned into a vmstat counter

From: Mel Gorman <mgorman@xxxxxxx>

commit 0d5d823ab4e608ec7b52ac4410de4cb74bbe0edd upstream.

zone->pages_scanned is a write-intensive cache line during page reclaim
and it's also updated during page free.  Move the counter into vmstat to
take advantage of the per-cpu updates and do not update it in the free
paths unless necessary.

On a small UMA machine running tiobench the difference is marginal.  On
a 4-node machine the overhead is more noticable.  Note that automatic
NUMA balancing was disabled for this test as otherwise the system CPU
overhead is unpredictable.

          3.16.0-rc3  3.16.0-rc3  3.16.0-rc3
             vanillarearrange-v5   vmstat-v5
User          746.94      759.78      774.56
System      65336.22    58350.98    32847.27
Elapsed     27553.52    27282.02    27415.04

Note that the overhead reduction will vary depending on where exactly
pages are allocated and freed.

Signed-off-by: Mel Gorman <mgorman@xxxxxxx>
Acked-by: Johannes Weiner <hannes@xxxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Mel Gorman <mgorman@xxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
 include/linux/mmzone.h |    2 +-
 mm/page_alloc.c        |   12 +++++++++---
 mm/vmscan.c            |    7 ++++---
 mm/vmstat.c            |    3 ++-
 4 files changed, 16 insertions(+), 8 deletions(-)

--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -143,6 +143,7 @@ enum zone_stat_item {
 	NR_SHMEM,		/* shmem pages (included tmpfs/GEM pages) */
 	NR_DIRTIED,		/* page dirtyings since bootup */
 	NR_WRITTEN,		/* page writings since bootup */
+	NR_PAGES_SCANNED,	/* pages scanned since last reclaim */
 #ifdef CONFIG_NUMA
 	NUMA_HIT,		/* allocated in intended node */
 	NUMA_MISS,		/* allocated in non intended node */
@@ -478,7 +479,6 @@ struct zone {
 
 	/* Fields commonly accessed by the page reclaim scanner */
 	spinlock_t		lru_lock;
-	unsigned long		pages_scanned;	   /* since last reclaim */
 	struct lruvec		lruvec;
 
 	/*
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -678,9 +678,12 @@ static void free_pcppages_bulk(struct zo
 	int migratetype = 0;
 	int batch_free = 0;
 	int to_free = count;
+	unsigned long nr_scanned;
 
 	spin_lock(&zone->lock);
-	zone->pages_scanned = 0;
+	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
+	if (nr_scanned)
+		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
 
 	while (to_free) {
 		struct page *page;
@@ -729,8 +732,11 @@ static void free_one_page(struct zone *z
 				unsigned int order,
 				int migratetype)
 {
+	unsigned long nr_scanned;
 	spin_lock(&zone->lock);
-	zone->pages_scanned = 0;
+	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
+	if (nr_scanned)
+		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
 
 	__free_one_page(page, pfn, zone, order, migratetype);
 	if (unlikely(!is_migrate_isolate(migratetype)))
@@ -3251,7 +3257,7 @@ void show_free_areas(unsigned int filter
 			K(zone_page_state(zone, NR_BOUNCE)),
 			K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
 			K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
-			zone->pages_scanned,
+			K(zone_page_state(zone, NR_PAGES_SCANNED)),
 			(!zone_reclaimable(zone) ? "yes" : "no")
 			);
 		printk("lowmem_reserve[]:");
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -163,7 +163,8 @@ static unsigned long zone_reclaimable_pa
 
 bool zone_reclaimable(struct zone *zone)
 {
-	return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
+	return zone_page_state(zone, NR_PAGES_SCANNED) <
+		zone_reclaimable_pages(zone) * 6;
 }
 
 static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
@@ -1470,7 +1471,7 @@ shrink_inactive_list(unsigned long nr_to
 	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
 
 	if (global_reclaim(sc)) {
-		zone->pages_scanned += nr_scanned;
+		__mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
 		if (current_is_kswapd())
 			__count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
 		else
@@ -1659,7 +1660,7 @@ static void shrink_active_list(unsigned
 	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
 				     &nr_scanned, sc, isolate_mode, lru);
 	if (global_reclaim(sc))
-		zone->pages_scanned += nr_scanned;
+		__mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
 
 	reclaim_stat->recent_scanned[file] += nr_taken;
 
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -761,6 +761,7 @@ const char * const vmstat_text[] = {
 	"nr_shmem",
 	"nr_dirtied",
 	"nr_written",
+	"nr_pages_scanned",
 
 #ifdef CONFIG_NUMA
 	"numa_hit",
@@ -1055,7 +1056,7 @@ static void zoneinfo_show_print(struct s
 		   min_wmark_pages(zone),
 		   low_wmark_pages(zone),
 		   high_wmark_pages(zone),
-		   zone->pages_scanned,
+		   zone_page_state(zone, NR_PAGES_SCANNED),
 		   zone->spanned_pages,
 		   zone->present_pages,
 		   zone->managed_pages);


Patches currently in stable-queue which might be from mgorman@xxxxxxx are

queue-3.14/mm-move-zone-pages_scanned-into-a-vmstat-counter.patch
queue-3.14/mm-vmscan-only-update-per-cpu-thresholds-for-online-cpu.patch
queue-3.14/mm-page_alloc-use-jump-labels-to-avoid-checking-number_of_cpusets.patch
queue-3.14/mm-page_alloc-do-not-treat-a-zone-that-cannot-be-used-for-dirty-pages-as-full.patch
queue-3.14/mm-non-atomically-mark-page-accessed-during-page-cache-allocation-where-possible.patch
queue-3.14/mm-page_alloc-convert-hot-cold-parameter-and-immediate-callers-to-bool.patch
queue-3.14/memcg-vmscan-fix-forced-scan-of-anonymous-pages.patch
queue-3.14/mm-thp-only-collapse-hugepages-to-nodes-with-affinity-for-zone_reclaim_mode.patch
queue-3.14/mm-page_alloc-only-check-the-zone-id-check-if-pages-are-buddies.patch
queue-3.14/mm-page_alloc-only-check-the-alloc-flags-and-gfp_mask-for-dirty-once.patch
queue-3.14/mm-page_alloc-do-not-update-zlc-unless-the-zlc-is-active.patch
queue-3.14/mm-swap.c-clean-up-lru_cache_add-functions.patch
queue-3.14/mm-page_alloc-take-the-alloc_no_watermark-check-out-of-the-fast-path.patch
queue-3.14/fs-buffer-do-not-use-unnecessary-atomic-operations-when-discarding-buffers.patch
queue-3.14/mm-do-not-use-atomic-operations-when-releasing-pages.patch
queue-3.14/mm-page_alloc-reduce-number-of-times-page_to_pfn-is-called.patch
queue-3.14/mm-page_alloc-use-unsigned-int-for-order-in-more-places.patch
queue-3.14/mm-avoid-unnecessary-atomic-operations-during-end_page_writeback.patch
queue-3.14/vmalloc-use-rcu-list-iterator-to-reduce-vmap_area_lock-contention.patch
queue-3.14/mm-page_alloc-abort-fair-zone-allocation-policy-when-remotes-nodes-are-encountered.patch
queue-3.14/mm-page_alloc-reduce-cost-of-the-fair-zone-allocation-policy.patch
queue-3.14/mm-shmem-avoid-atomic-operation-during-shmem_getpage_gfp.patch
queue-3.14/mm-pagemap-avoid-unnecessary-overhead-when-tracepoints-are-deactivated.patch
queue-3.14/shmem-fix-init_page_accessed-use-to-stop-pagelru-bug.patch
queue-3.14/mm-memory.c-use-entry-access_once-pte-in-handle_pte_fault.patch
queue-3.14/mm-make-copy_pte_range-static-again.patch
queue-3.14/mm-rearrange-zone-fields-into-read-only-page-alloc-statistics-and-page-reclaim-lines.patch
queue-3.14/mm-do-not-use-unnecessary-atomic-operations-when-adding-pages-to-the-lru.patch
queue-3.14/include-linux-jump_label.h-expose-the-reference-count.patch
queue-3.14/mm-page_alloc-calculate-classzone_idx-once-from-the.patch
queue-3.14/mm-page_alloc-lookup-pageblock-migratetype-with-irqs-enabled-during-free.patch
--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [Linux Kernel]     [Kernel Development Newbies]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite Hiking]     [Linux Kernel]     [Linux SCSI]