+ zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled

     zoned vm counters: split NR_ANON_PAGES off from NR_FILE_MAPPED

has been added to the -mm tree.  Its filename is

     zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped.patch

See http://www.zip.com.au/~akpm/linux/patches/stuff/added-to-mm.txt to find
out what to do about this

------------------------------------------------------
Subject: zoned vm counters: split NR_ANON_PAGES off from NR_FILE_MAPPED
From: Christoph Lameter <clameter@xxxxxxx>


The current NR_FILE_MAPPED is used by zone reclaim and the dirty load
calculation as the number of mapped pagecache pages.  However, that is not
true.  NR_FILE_MAPPED includes the mapped anonymous pages.  This patch
separates those and therefore allows an accurate tracking of the anonymous
pages per zone.

It then becomes possible to determine the number of unmapped pages per zone
and we can avoid scanning for unmapped pages if there are none.

Also it may now be possible to determine the mapped/unmapped ratio in
get_dirty_limit.  Isnt the number of anonymous pages irrelevant in that
calculation?

Note that this will change the meaning of the number of mapped pages reported
in /proc/vmstat /proc/meminfo and in the per node statistics.  This may affect
user space tools that monitor these counters!  NR_FILE_MAPPED works like
NR_FILE_DIRTY.  It is only valid for pagecache pages.

Signed-off-by: Christoph Lameter <clameter@xxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxx>
---

 drivers/base/node.c    |    2 ++
 fs/proc/proc_misc.c    |    2 ++
 include/linux/mmzone.h |    3 ++-
 mm/page-writeback.c    |    3 ++-
 mm/rmap.c              |    5 +++--
 mm/vmscan.c            |    3 ++-
 mm/vmstat.c            |    1 +
 7 files changed, 14 insertions(+), 5 deletions(-)

diff -puN drivers/base/node.c~zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped drivers/base/node.c
--- a/drivers/base/node.c~zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped
+++ a/drivers/base/node.c
@@ -71,6 +71,7 @@ static ssize_t node_read_meminfo(struct 
 		       "Node %d Writeback:    %8lu kB\n"
 		       "Node %d FilePages:    %8lu kB\n"
 		       "Node %d Mapped:       %8lu kB\n"
+		       "Node %d AnonPages:    %8lu kB\n"
 		       "Node %d Slab:         %8lu kB\n",
 		       nid, K(i.totalram),
 		       nid, K(i.freeram),
@@ -85,6 +86,7 @@ static ssize_t node_read_meminfo(struct 
 		       nid, K(ps.nr_writeback),
 		       nid, K(node_page_state(nid, NR_FILE_PAGES)),
 		       nid, K(node_page_state(nid, NR_FILE_MAPPED)),
+		       nid, K(node_page_state(nid, NR_ANON_PAGES)),
 		       nid, K(ps.nr_slab));
 	n += hugetlb_report_node_meminfo(nid, buf + n);
 	return n;
diff -puN fs/proc/proc_misc.c~zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped fs/proc/proc_misc.c
--- a/fs/proc/proc_misc.c~zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped
+++ a/fs/proc/proc_misc.c
@@ -168,6 +168,7 @@ static int meminfo_read_proc(char *page,
 		"SwapFree:     %8lu kB\n"
 		"Dirty:        %8lu kB\n"
 		"Writeback:    %8lu kB\n"
+		"AnonPages:    %8lu kB\n"
 		"Mapped:       %8lu kB\n"
 		"Slab:         %8lu kB\n"
 		"CommitLimit:  %8lu kB\n"
@@ -191,6 +192,7 @@ static int meminfo_read_proc(char *page,
 		K(i.freeswap),
 		K(ps.nr_dirty),
 		K(ps.nr_writeback),
+		K(global_page_state(NR_ANON_PAGES)),
 		K(global_page_state(NR_FILE_MAPPED)),
 		K(ps.nr_slab),
 		K(allowed),
diff -puN include/linux/mmzone.h~zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped include/linux/mmzone.h
--- a/include/linux/mmzone.h~zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped
+++ a/include/linux/mmzone.h
@@ -47,7 +47,8 @@ struct zone_padding {
 #endif
 
 enum zone_stat_item {
-	NR_FILE_MAPPED,	/* mapped into pagetables.
+	NR_ANON_PAGES,	/* Mapped anonymous pages */
+	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables.
 			   only modified from process context */
 	NR_FILE_PAGES,
 	NR_VM_ZONE_STAT_ITEMS };
diff -puN mm/page-writeback.c~zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped mm/page-writeback.c
--- a/mm/page-writeback.c~zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped
+++ a/mm/page-writeback.c
@@ -112,7 +112,8 @@ static void get_writeback_state(struct w
 {
 	wbs->nr_dirty = read_page_state(nr_dirty);
 	wbs->nr_unstable = read_page_state(nr_unstable);
-	wbs->nr_mapped = global_page_state(NR_FILE_MAPPED);
+	wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) +
+				global_page_state(NR_ANON_PAGES);
 	wbs->nr_writeback = read_page_state(nr_writeback);
 }
 
diff -puN mm/rmap.c~zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped mm/rmap.c
--- a/mm/rmap.c~zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped
+++ a/mm/rmap.c
@@ -523,7 +523,7 @@ static void __page_set_anon_rmap(struct 
 	 * nr_mapped state can be updated without turning off
 	 * interrupts because it is not modified via interrupt.
 	 */
-	__inc_zone_page_state(page, NR_FILE_MAPPED);
+	__inc_zone_page_state(page, NR_ANON_PAGES);
 }
 
 /**
@@ -599,7 +599,8 @@ void page_remove_rmap(struct page *page)
 		 */
 		if (page_test_and_clear_dirty(page))
 			set_page_dirty(page);
-		__dec_zone_page_state(page, NR_FILE_MAPPED);
+		__dec_zone_page_state(page,
+				PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
 	}
 }
 
diff -puN mm/vmscan.c~zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped mm/vmscan.c
--- a/mm/vmscan.c~zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped
+++ a/mm/vmscan.c
@@ -742,7 +742,8 @@ static void shrink_active_list(unsigned 
 		 * how much memory
 		 * is mapped.
 		 */
-		mapped_ratio = (global_page_state(NR_FILE_MAPPED) * 100) /
+		mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
+				global_page_state(NR_ANON_PAGES)) * 100) /
 					vm_total_pages;
 
 		/*
diff -puN mm/vmstat.c~zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped mm/vmstat.c
--- a/mm/vmstat.c~zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped
+++ a/mm/vmstat.c
@@ -457,6 +457,7 @@ struct seq_operations fragmentation_op =
 
 static char *vmstat_text[] = {
 	/* Zoned VM counters */
+	"nr_anon_pages",
 	"nr_mapped",
 	"nr_file_pages",
 
_

Patches currently in -mm which might be from clameter@xxxxxxx are

page-migration-make-do_swap_page-redo-the-fault.patch
slab-extract-cache_free_alien-from-__cache_free.patch
migration-remove-unnecessary-pageswapcache-checks.patch
page-migration-cleanup-rename-ignrefs-to-migration.patch
page-migration-cleanup-group-functions.patch
page-migration-cleanup-remove-useless-definitions.patch
page-migration-cleanup-drop-nr_refs-in-remove_references.patch
page-migration-cleanup-extract-try_to_unmap-from-migration-functions.patch
page-migration-cleanup-pass-mapping-to-migration-functions.patch
page-migration-cleanup-move-fallback-handling-into-special-function.patch
swapless-pm-add-r-w-migration-entries.patch
swapless-page-migration-rip-out-swap-based-logic.patch
swapless-page-migration-modify-core-logic.patch
more-page-migration-do-not-inc-dec-rss-counters.patch
more-page-migration-use-migration-entries-for-file-pages.patch
page-migration-update-documentation.patch
mm-remove-vm_locked-before-remap_pfn_range-and-drop-vm_shm.patch
page-migration-simplify-migrate_pages.patch
page-migration-handle-freeing-of-pages-in-migrate_pages.patch
page-migration-use-allocator-function-for-migrate_pages.patch
page-migration-support-moving-of-individual-pages.patch
page-migration-support-moving-of-individual-pages-x86_64-support.patch
page-migration-support-moving-of-individual-pages-x86-support.patch
page-migration-support-a-vma-migration-function.patch
allow-migration-of-mlocked-pages.patch
mm-remove-some-update_mmu_cache-calls.patch
zoned-vm-counters-create-vmstatc-h-from-page_allocc-h.patch
zoned-vm-counters-basic-zvc-zoned-vm-counter-implementation.patch
zoned-vm-counters-basic-zvc-zoned-vm-counter-implementation-tidy.patch
zoned-vm-counters-convert-nr_mapped-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter.patch
zoned-vm-counters-remove-nr_file_mapped-from-scan-control-structure.patch
zoned-vm-counters-remove-nr_file_mapped-from-scan-control-structure-fix.patch
zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped.patch
zoned-vm-counters-zone_reclaim-remove-proc-sys-vm-zone_reclaim_interval.patch
zoned-vm-counters-conversion-of-nr_slab-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_pagetables-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_writeback-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_unstable-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_bounce-to-per-zone-counter.patch
zoned-vm-counters-remove-useless-struct-wbs.patch
selinux-add-task_movememory-hook.patch
selinux-add-security_task_movememory-calls-to-mm-code.patch
cpuset-remove-extra-cpuset_zone_allowed-check-in-__alloc_pages.patch
corrections-to-memory-barrier-doc.patch

-
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Kernel Newbies FAQ]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Photo]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux