The patch titled zoned vm counters: conversion of nr_pagecache to per zone counter has been added to the -mm tree. Its filename is zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter.patch See http://www.zip.com.au/~akpm/linux/patches/stuff/added-to-mm.txt to find out what to do about this ------------------------------------------------------ Subject: zoned vm counters: conversion of nr_pagecache to per zone counter From: Christoph Lameter <clameter@xxxxxxx> Currently a single atomic variable is used to establish the size of the page cache in the whole machine. The zoned VM counters have the same method of implementation as the nr_pagecache code but also allow the determination of the pagecache size per zone. Remove the special implementation for nr_pagecache and make it a zoned counter named NR_FILE_PAGES. Updates of the page cache counters are always performed with interrupts off. We can therefore use the __ variant here. Signed-off-by: Christoph Lameter <clameter@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxx> --- arch/s390/appldata/appldata_mem.c | 3 + arch/sparc/kernel/sys_sunos.c | 2 - arch/sparc64/kernel/sys_sunos32.c | 2 - drivers/base/node.c | 2 + fs/proc/proc_misc.c | 3 + include/linux/mmzone.h | 2 - include/linux/pagemap.h | 45 ---------------------------- mm/filemap.c | 4 +- mm/mmap.c | 2 - mm/nommu.c | 2 - mm/page_alloc.c | 5 --- mm/swap_state.c | 4 +- mm/vmstat.c | 7 ---- 13 files changed, 16 insertions(+), 67 deletions(-) diff -puN arch/s390/appldata/appldata_mem.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter arch/s390/appldata/appldata_mem.c --- a/arch/s390/appldata/appldata_mem.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter +++ a/arch/s390/appldata/appldata_mem.c @@ -130,7 +130,8 @@ static void appldata_get_mem_data(void * mem_data->totalhigh = P2K(val.totalhigh); mem_data->freehigh = P2K(val.freehigh); mem_data->bufferram = P2K(val.bufferram); - mem_data->cached = P2K(atomic_read(&nr_pagecache) - val.bufferram); + mem_data->cached = P2K(global_page_state(NR_FILE_PAGES) + - val.bufferram); si_swapinfo(&val); mem_data->totalswap = P2K(val.totalswap); diff -puN arch/sparc64/kernel/sys_sunos32.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter arch/sparc64/kernel/sys_sunos32.c --- a/arch/sparc64/kernel/sys_sunos32.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter +++ a/arch/sparc64/kernel/sys_sunos32.c @@ -155,7 +155,7 @@ asmlinkage int sunos_brk(u32 baddr) * simple, it hopefully works in most obvious cases.. Easy to * fool it, but this should catch most mistakes. */ - freepages = get_page_cache_size(); + freepages = global_page_state(NR_FILE_PAGES); freepages >>= 1; freepages += nr_free_pages(); freepages += nr_swap_pages; diff -puN arch/sparc/kernel/sys_sunos.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter arch/sparc/kernel/sys_sunos.c --- a/arch/sparc/kernel/sys_sunos.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter +++ a/arch/sparc/kernel/sys_sunos.c @@ -196,7 +196,7 @@ asmlinkage int sunos_brk(unsigned long b * simple, it hopefully works in most obvious cases.. Easy to * fool it, but this should catch most mistakes. */ - freepages = get_page_cache_size(); + freepages = global_page_state(NR_FILE_PAGES); freepages >>= 1; freepages += nr_free_pages(); freepages += nr_swap_pages; diff -puN drivers/base/node.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter drivers/base/node.c --- a/drivers/base/node.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter +++ a/drivers/base/node.c @@ -69,6 +69,7 @@ static ssize_t node_read_meminfo(struct "Node %d LowFree: %8lu kB\n" "Node %d Dirty: %8lu kB\n" "Node %d Writeback: %8lu kB\n" + "Node %d FilePages: %8lu kB\n" "Node %d Mapped: %8lu kB\n" "Node %d Slab: %8lu kB\n", nid, K(i.totalram), @@ -82,6 +83,7 @@ static ssize_t node_read_meminfo(struct nid, K(i.freeram - i.freehigh), nid, K(ps.nr_dirty), nid, K(ps.nr_writeback), + nid, K(node_page_state(nid, NR_FILE_PAGES)), nid, K(node_page_state(nid, NR_FILE_MAPPED)), nid, K(ps.nr_slab)); n += hugetlb_report_node_meminfo(nid, buf + n); diff -puN fs/proc/proc_misc.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter fs/proc/proc_misc.c --- a/fs/proc/proc_misc.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter +++ a/fs/proc/proc_misc.c @@ -142,7 +142,8 @@ static int meminfo_read_proc(char *page, allowed = ((totalram_pages - hugetlb_total_pages()) * sysctl_overcommit_ratio / 100) + total_swap_pages; - cached = get_page_cache_size() - total_swapcache_pages - i.bufferram; + cached = global_page_state(NR_FILE_PAGES) - + total_swapcache_pages - i.bufferram; if (cached < 0) cached = 0; diff -puN include/linux/mmzone.h~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter include/linux/mmzone.h --- a/include/linux/mmzone.h~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter +++ a/include/linux/mmzone.h @@ -49,7 +49,7 @@ struct zone_padding { enum zone_stat_item { NR_FILE_MAPPED, /* mapped into pagetables. only modified from process context */ - + NR_FILE_PAGES, NR_VM_ZONE_STAT_ITEMS }; struct per_cpu_pages { diff -puN include/linux/pagemap.h~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter include/linux/pagemap.h --- a/include/linux/pagemap.h~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter +++ a/include/linux/pagemap.h @@ -106,51 +106,6 @@ int add_to_page_cache_lru(struct page *p extern void remove_from_page_cache(struct page *page); extern void __remove_from_page_cache(struct page *page); -extern atomic_t nr_pagecache; - -#ifdef CONFIG_SMP - -#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2) -DECLARE_PER_CPU(long, nr_pagecache_local); - -/* - * pagecache_acct implements approximate accounting for pagecache. - * vm_enough_memory() do not need high accuracy. Writers will keep - * an offset in their per-cpu arena and will spill that into the - * global count whenever the absolute value of the local count - * exceeds the counter's threshold. - * - * MUST be protected from preemption. - * current protection is mapping->page_lock. - */ -static inline void pagecache_acct(int count) -{ - long *local; - - local = &__get_cpu_var(nr_pagecache_local); - *local += count; - if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) { - atomic_add(*local, &nr_pagecache); - *local = 0; - } -} - -#else - -static inline void pagecache_acct(int count) -{ - atomic_add(count, &nr_pagecache); -} -#endif - -static inline unsigned long get_page_cache_size(void) -{ - int ret = atomic_read(&nr_pagecache); - if (unlikely(ret < 0)) - ret = 0; - return ret; -} - /* * Return byte-offset into filesystem object for page. */ diff -puN mm/filemap.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter mm/filemap.c --- a/mm/filemap.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter +++ a/mm/filemap.c @@ -120,7 +120,7 @@ void __remove_from_page_cache(struct pag radix_tree_delete(&mapping->page_tree, page->index); page->mapping = NULL; mapping->nrpages--; - pagecache_acct(-1); + __dec_zone_page_state(page, NR_FILE_PAGES); } void remove_from_page_cache(struct page *page) @@ -449,7 +449,7 @@ int add_to_page_cache(struct page *page, page->mapping = mapping; page->index = offset; mapping->nrpages++; - pagecache_acct(1); + __inc_zone_page_state(page, NR_FILE_PAGES); } write_unlock_irq(&mapping->tree_lock); radix_tree_preload_end(); diff -puN mm/mmap.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter mm/mmap.c --- a/mm/mmap.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter +++ a/mm/mmap.c @@ -97,7 +97,7 @@ int __vm_enough_memory(long pages, int c if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { unsigned long n; - free = get_page_cache_size(); + free = global_page_state(NR_FILE_PAGES); free += nr_swap_pages; /* diff -puN mm/nommu.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter mm/nommu.c --- a/mm/nommu.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter +++ a/mm/nommu.c @@ -1122,7 +1122,7 @@ int __vm_enough_memory(long pages, int c if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { unsigned long n; - free = get_page_cache_size(); + free = global_page_state(NR_FILE_PAGES); free += nr_swap_pages; /* diff -puN mm/page_alloc.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter mm/page_alloc.c --- a/mm/page_alloc.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter +++ a/mm/page_alloc.c @@ -2133,16 +2133,11 @@ static int page_alloc_cpu_notify(struct unsigned long action, void *hcpu) { int cpu = (unsigned long)hcpu; - long *count; unsigned long *src, *dest; if (action == CPU_DEAD) { int i; - /* Drain local pagecache count. */ - count = &per_cpu(nr_pagecache_local, cpu); - atomic_add(*count, &nr_pagecache); - *count = 0; local_irq_disable(); __drain_pages(cpu); diff -puN mm/swap_state.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter mm/swap_state.c --- a/mm/swap_state.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter +++ a/mm/swap_state.c @@ -87,7 +87,7 @@ static int __add_to_swap_cache(struct pa SetPageSwapCache(page); set_page_private(page, entry.val); total_swapcache_pages++; - pagecache_acct(1); + __inc_zone_page_state(page, NR_FILE_PAGES); } write_unlock_irq(&swapper_space.tree_lock); radix_tree_preload_end(); @@ -132,7 +132,7 @@ void __delete_from_swap_cache(struct pag set_page_private(page, 0); ClearPageSwapCache(page); total_swapcache_pages--; - pagecache_acct(-1); + __dec_zone_page_state(page, NR_FILE_PAGES); INC_CACHE_INFO(del_total); } diff -puN mm/vmstat.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter mm/vmstat.c --- a/mm/vmstat.c~zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter +++ a/mm/vmstat.c @@ -20,12 +20,6 @@ */ static DEFINE_PER_CPU(struct page_state, page_states) = {0}; -atomic_t nr_pagecache = ATOMIC_INIT(0); -EXPORT_SYMBOL(nr_pagecache); -#ifdef CONFIG_SMP -DEFINE_PER_CPU(long, nr_pagecache_local) = 0; -#endif - static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) { unsigned cpu; @@ -464,6 +458,7 @@ struct seq_operations fragmentation_op = static char *vmstat_text[] = { /* Zoned VM counters */ "nr_mapped", + "nr_file_pages", /* Page state */ "nr_dirty", _ Patches currently in -mm which might be from clameter@xxxxxxx are page-migration-make-do_swap_page-redo-the-fault.patch slab-extract-cache_free_alien-from-__cache_free.patch migration-remove-unnecessary-pageswapcache-checks.patch page-migration-cleanup-rename-ignrefs-to-migration.patch page-migration-cleanup-group-functions.patch page-migration-cleanup-remove-useless-definitions.patch page-migration-cleanup-drop-nr_refs-in-remove_references.patch page-migration-cleanup-extract-try_to_unmap-from-migration-functions.patch page-migration-cleanup-pass-mapping-to-migration-functions.patch page-migration-cleanup-move-fallback-handling-into-special-function.patch swapless-pm-add-r-w-migration-entries.patch swapless-page-migration-rip-out-swap-based-logic.patch swapless-page-migration-modify-core-logic.patch more-page-migration-do-not-inc-dec-rss-counters.patch more-page-migration-use-migration-entries-for-file-pages.patch page-migration-update-documentation.patch mm-remove-vm_locked-before-remap_pfn_range-and-drop-vm_shm.patch page-migration-simplify-migrate_pages.patch page-migration-handle-freeing-of-pages-in-migrate_pages.patch page-migration-use-allocator-function-for-migrate_pages.patch page-migration-support-moving-of-individual-pages.patch page-migration-support-moving-of-individual-pages-x86_64-support.patch page-migration-support-moving-of-individual-pages-x86-support.patch page-migration-support-a-vma-migration-function.patch allow-migration-of-mlocked-pages.patch mm-remove-some-update_mmu_cache-calls.patch zoned-vm-counters-create-vmstatc-h-from-page_allocc-h.patch zoned-vm-counters-basic-zvc-zoned-vm-counter-implementation.patch zoned-vm-counters-basic-zvc-zoned-vm-counter-implementation-tidy.patch zoned-vm-counters-convert-nr_mapped-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter.patch zoned-vm-counters-remove-nr_file_mapped-from-scan-control-structure.patch zoned-vm-counters-remove-nr_file_mapped-from-scan-control-structure-fix.patch zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped.patch zoned-vm-counters-zone_reclaim-remove-proc-sys-vm-zone_reclaim_interval.patch zoned-vm-counters-conversion-of-nr_slab-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_pagetables-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_writeback-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_unstable-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_bounce-to-per-zone-counter.patch zoned-vm-counters-remove-useless-struct-wbs.patch selinux-add-task_movememory-hook.patch selinux-add-security_task_movememory-calls-to-mm-code.patch cpuset-remove-extra-cpuset_zone_allowed-check-in-__alloc_pages.patch corrections-to-memory-barrier-doc.patch - To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html