- zoned-vm-stats-add-nr_anon.patch removed from -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled

     zoned VM stats: Add NR_ANON

has been removed from the -mm tree.  Its filename is

     zoned-vm-stats-add-nr_anon.patch

This patch was dropped because it had testing failures

------------------------------------------------------
Subject: zoned VM stats: Add NR_ANON
From: Christoph Lameter <clameter@xxxxxxx>


The current NR_MAPPED is used by zone reclaim and the dirty load
calculation as the number of mapped pagecache pages.  However, that is not
true.  NR_MAPPED includes the mapped anonymous pages.  This patch clearly
separates those and therefore allows an accurate tracking of the anonymous
pages per zone and the number of mapped pages in the pagecache of each
zone.

We can then more accurately determine when zone reclaim is to be run.

Also it may now be possible to determine the mapped/unmapped ratio in
get_dirty_limit.  Isnt the number of anonymous pages irrelevant in that
calculation?

Note that this will change the meaning of the number of mapped pages
reported in /proc/vmstat /proc/meminfo and in the per node statistics. 
This may affect user space tools that monitor these counters!

However, NR_MAPPED then works like NR_DIRTY.  It is only valid for
pagecache pages.

Signed-off-by: Christoph Lameter <clameter@xxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxx>
---

 drivers/base/node.c    |    2 ++
 fs/proc/proc_misc.c    |    2 ++
 include/linux/mmzone.h |    3 ++-
 mm/page-writeback.c    |    4 +++-
 mm/page_alloc.c        |    6 ++++--
 mm/rmap.c              |    4 ++--
 mm/vmscan.c            |   18 +++++++++++-------
 7 files changed, 26 insertions(+), 13 deletions(-)

diff -puN drivers/base/node.c~zoned-vm-stats-add-nr_anon drivers/base/node.c
--- devel/drivers/base/node.c~zoned-vm-stats-add-nr_anon	2006-06-09 15:17:45.000000000 -0700
+++ devel-akpm/drivers/base/node.c	2006-06-09 15:17:45.000000000 -0700
@@ -65,6 +65,7 @@ static ssize_t node_read_meminfo(struct 
 		       "Node %d Dirty:        %8lu kB\n"
 		       "Node %d Writeback:    %8lu kB\n"
 		       "Node %d Unstable:     %8lu kB\n"
+		       "Node %d Anonymous:    %8lu kB\n"
 		       "Node %d Mapped:       %8lu kB\n"
 		       "Node %d Pagecache:    %8lu kB\n"
 		       "Node %d Slab:         %8lu kB\n"
@@ -81,6 +82,7 @@ static ssize_t node_read_meminfo(struct 
 		       nid, K(nr[NR_DIRTY]),
 		       nid, K(nr[NR_WRITEBACK]),
 		       nid, K(nr[NR_UNSTABLE]),
+		       nid, K(nr[NR_ANON]),
 		       nid, K(nr[NR_MAPPED]),
 		       nid, K(nr[NR_PAGECACHE]),
 		       nid, K(nr[NR_SLAB]),
diff -puN fs/proc/proc_misc.c~zoned-vm-stats-add-nr_anon fs/proc/proc_misc.c
--- devel/fs/proc/proc_misc.c~zoned-vm-stats-add-nr_anon	2006-06-09 15:17:45.000000000 -0700
+++ devel-akpm/fs/proc/proc_misc.c	2006-06-09 15:17:45.000000000 -0700
@@ -165,6 +165,7 @@ static int meminfo_read_proc(char *page,
 		"SwapFree:     %8lu kB\n"
 		"Dirty:        %8lu kB\n"
 		"Writeback:    %8lu kB\n"
+		"Anonymous:    %8lu kB\n"
 		"Mapped:       %8lu kB\n"
 		"Slab:         %8lu kB\n"
 		"CommitLimit:  %8lu kB\n"
@@ -188,6 +189,7 @@ static int meminfo_read_proc(char *page,
 		K(i.freeswap),
 		K(global_page_state(NR_DIRTY)),
 		K(global_page_state(NR_WRITEBACK)),
+		K(global_page_state(NR_ANON)),
 		K(global_page_state(NR_MAPPED)),
 		K(global_page_state(NR_SLAB)),
 		K(allowed),
diff -puN include/linux/mmzone.h~zoned-vm-stats-add-nr_anon include/linux/mmzone.h
--- devel/include/linux/mmzone.h~zoned-vm-stats-add-nr_anon	2006-06-09 15:17:45.000000000 -0700
+++ devel-akpm/include/linux/mmzone.h	2006-06-09 15:17:45.000000000 -0700
@@ -47,7 +47,8 @@ struct zone_padding {
 #endif
 
 enum zone_stat_item {
-	NR_MAPPED,	/* mapped into pagetables.
+	NR_ANON,	/* Mapped anonymous pages */
+	NR_MAPPED,	/* pagecache pages mapped into pagetables.
 			   only modified from process context */
 	NR_PAGECACHE,	/* file backed pages */
 	NR_SLAB,	/* used by slab allocator */
diff -puN mm/page_alloc.c~zoned-vm-stats-add-nr_anon mm/page_alloc.c
--- devel/mm/page_alloc.c~zoned-vm-stats-add-nr_anon	2006-06-09 15:17:45.000000000 -0700
+++ devel-akpm/mm/page_alloc.c	2006-06-09 15:17:45.000000000 -0700
@@ -614,8 +614,9 @@ static int rmqueue_bulk(struct zone *zon
 }
 
 char *vm_stat_item_descr[NR_STAT_ITEMS] = {
-	"mapped", "pagecache", "slab", "pagetable", "dirty", "writeback",
-	"unstable", "bounce"
+	"anon", "mapped", "pagecache", "slab",
+	"pagetable", "dirty", "writeback", "unstable",
+	"bounce"
 };
 
 /*
@@ -2689,6 +2690,7 @@ struct seq_operations zoneinfo_op = {
 
 static char *vmstat_text[] = {
 	/* Zoned VM counters */
+	"nr_anon",
 	"nr_mapped",
 	"nr_pagecache",
 	"nr_slab",
diff -puN mm/page-writeback.c~zoned-vm-stats-add-nr_anon mm/page-writeback.c
--- devel/mm/page-writeback.c~zoned-vm-stats-add-nr_anon	2006-06-09 15:17:45.000000000 -0700
+++ devel-akpm/mm/page-writeback.c	2006-06-09 15:17:45.000000000 -0700
@@ -137,7 +137,9 @@ get_dirty_limits(long *pbackground, long
 #endif
 
 
-	unmapped_ratio = 100 - (global_page_state(NR_MAPPED) * 100) / total_pages;
+	unmapped_ratio = 100 - ((global_page_state(NR_MAPPED) +
+				 global_page_state(NR_ANON)) * 100) /
+					total_pages;
 
 	dirty_ratio = vm_dirty_ratio;
 	if (dirty_ratio > unmapped_ratio / 2)
diff -puN mm/rmap.c~zoned-vm-stats-add-nr_anon mm/rmap.c
--- devel/mm/rmap.c~zoned-vm-stats-add-nr_anon	2006-06-09 15:17:45.000000000 -0700
+++ devel-akpm/mm/rmap.c	2006-06-09 15:17:45.000000000 -0700
@@ -455,7 +455,7 @@ static void __page_set_anon_rmap(struct 
 	 * nr_mapped state can be updated without turning off
 	 * interrupts because it is not modified via interrupt.
 	 */
-	__inc_zone_page_state(page, NR_MAPPED);
+	__inc_zone_page_state(page, NR_ANON);
 }
 
 /**
@@ -531,7 +531,7 @@ void page_remove_rmap(struct page *page)
 		 */
 		if (page_test_and_clear_dirty(page))
 			set_page_dirty(page);
-		__dec_zone_page_state(page, NR_MAPPED);
+		__dec_zone_page_state(page, PageAnon(page) ? NR_ANON : NR_MAPPED);
 	}
 }
 
diff -puN mm/vmscan.c~zoned-vm-stats-add-nr_anon mm/vmscan.c
--- devel/mm/vmscan.c~zoned-vm-stats-add-nr_anon	2006-06-09 15:17:45.000000000 -0700
+++ devel-akpm/mm/vmscan.c	2006-06-09 15:17:45.000000000 -0700
@@ -742,7 +742,8 @@ static void shrink_active_list(unsigned 
 		 * how much memory
 		 * is mapped.
 		 */
-		mapped_ratio = global_page_state(NR_MAPPED) / vm_total_pages;
+		mapped_ratio = (global_page_state(NR_MAPPED) +
+				global_page_state(NR_ANON)) / vm_total_pages;
 
 		/*
 		 * Now decide how much we really want to unmap some pages.  The
@@ -1594,13 +1595,16 @@ int zone_reclaim(struct zone *zone, gfp_
 
 	/*
 	 * Do not reclaim if there are not enough reclaimable pages in this
-	 * zone. We decide this based on the number of mapped pages
-	 * in relation to the number of page cache pages in this zone.
-	 * If there are more pagecache pages than mapped pages then we can
-	 * be certain that pages can be reclaimed.
+	 * zone that would satify this allocations.
+	 *
+	 * All unmapped pagecache pages are reclaimable.
+	 *
+	 * Both counters may be temporarily off a bit so we use
+	 * SWAP_CLUSTER_MAX as the boundary. It may also be good to
+	 * leave a few frequently used unmapped pagecache pages around.
 	 */
-	if (zone_page_state(zone, NR_PAGECACHE) <
-		zone_page_state(zone, NR_MAPPED))
+	if (zone_page_state(zone, NR_PAGECACHE) -
+		zone_page_state(zone, NR_MAPPED) < SWAP_CLUSTER_MAX)
 			return 0;
 
 	/*
_

Patches currently in -mm which might be from clameter@xxxxxxx are

page-migration-make-do_swap_page-redo-the-fault.patch
slab-extract-cache_free_alien-from-__cache_free.patch
migration-remove-unnecessary-pageswapcache-checks.patch
page-migration-cleanup-rename-ignrefs-to-migration.patch
page-migration-cleanup-group-functions.patch
page-migration-cleanup-remove-useless-definitions.patch
page-migration-cleanup-drop-nr_refs-in-remove_references.patch
page-migration-cleanup-extract-try_to_unmap-from-migration-functions.patch
page-migration-cleanup-pass-mapping-to-migration-functions.patch
page-migration-cleanup-move-fallback-handling-into-special-function.patch
swapless-pm-add-r-w-migration-entries.patch
swapless-page-migration-rip-out-swap-based-logic.patch
swapless-page-migration-modify-core-logic.patch
more-page-migration-do-not-inc-dec-rss-counters.patch
more-page-migration-use-migration-entries-for-file-pages.patch
page-migration-update-documentation.patch
mm-remove-vm_locked-before-remap_pfn_range-and-drop-vm_shm.patch
page-migration-simplify-migrate_pages.patch
page-migration-simplify-migrate_pages-tweaks.patch
page-migration-handle-freeing-of-pages-in-migrate_pages.patch
page-migration-use-allocator-function-for-migrate_pages.patch
page-migration-support-moving-of-individual-pages.patch
page-migration-detailed-status-for-moving-of-individual-pages.patch
page-migration-support-moving-of-individual-pages-fixes.patch
page-migration-support-moving-of-individual-pages-x86_64-support.patch
page-migration-support-moving-of-individual-pages-x86-support.patch
page-migration-support-a-vma-migration-function.patch
allow-migration-of-mlocked-pages.patch
cpuset-remove-extra-cpuset_zone_allowed-check-in-__alloc_pages.patch

-
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Kernel Newbies FAQ]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Photo]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux