The patch titled zoned vm counters: conversion of nr_dirty to per zone counter has been added to the -mm tree. Its filename is zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter.patch See http://www.zip.com.au/~akpm/linux/patches/stuff/added-to-mm.txt to find out what to do about this ------------------------------------------------------ Subject: zoned vm counters: conversion of nr_dirty to per zone counter From: Christoph Lameter <clameter@xxxxxxx> Conversion of nr_dirty to a per zone counter This makes nr_dirty a per zone counter. Looping over all processors is avoided during writeback state determination. The counter aggregation for nr_dirty had to be undone in the NFS layer since it summed up the page counts from multiple zones. Signed-off-by: Christoph Lameter <clameter@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxx> --- arch/i386/mm/pgtable.c | 2 +- drivers/base/node.c | 4 +--- fs/buffer.c | 2 +- fs/fs-writeback.c | 2 +- fs/nfs/pagelist.c | 1 + fs/nfs/write.c | 3 +-- fs/proc/proc_misc.c | 2 +- include/linux/mmzone.h | 1 + include/linux/page-flags.h | 1 - mm/page-writeback.c | 10 +++++----- mm/page_alloc.c | 8 +++++--- 11 files changed, 18 insertions(+), 18 deletions(-) diff -puN arch/i386/mm/pgtable.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter arch/i386/mm/pgtable.c --- devel/arch/i386/mm/pgtable.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter 2006-06-09 02:49:43.000000000 -0700 +++ devel-akpm/arch/i386/mm/pgtable.c 2006-06-09 02:49:43.000000000 -0700 @@ -59,7 +59,7 @@ void show_mem(void) printk(KERN_INFO "%d pages swap cached\n", cached); get_page_state(&ps); - printk(KERN_INFO "%lu pages dirty\n", ps.nr_dirty); + printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_DIRTY)); printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback); printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_MAPPED)); printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB)); diff -puN drivers/base/node.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter drivers/base/node.c --- devel/drivers/base/node.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter 2006-06-09 02:49:43.000000000 -0700 +++ devel-akpm/drivers/base/node.c 2006-06-09 02:49:43.000000000 -0700 @@ -54,8 +54,6 @@ static ssize_t node_read_meminfo(struct nr[j] = node_page_state(nid, j); /* Check for negative values in these approximate counters */ - if ((long)ps.nr_dirty < 0) - ps.nr_dirty = 0; if ((long)ps.nr_writeback < 0) ps.nr_writeback = 0; @@ -83,7 +81,7 @@ static ssize_t node_read_meminfo(struct nid, K(i.freehigh), nid, K(i.totalram - i.totalhigh), nid, K(i.freeram - i.freehigh), - nid, K(ps.nr_dirty), + nid, K(nr[NR_DIRTY]), nid, K(ps.nr_writeback), nid, K(nr[NR_MAPPED]), nid, K(nr[NR_PAGECACHE]), diff -puN fs/buffer.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter fs/buffer.c --- devel/fs/buffer.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter 2006-06-09 02:49:43.000000000 -0700 +++ devel-akpm/fs/buffer.c 2006-06-09 02:49:43.000000000 -0700 @@ -854,7 +854,7 @@ int __set_page_dirty_buffers(struct page write_lock_irq(&mapping->tree_lock); if (page->mapping) { /* Race with truncate? */ if (mapping_cap_account_dirty(mapping)) - inc_page_state(nr_dirty); + __inc_zone_page_state(page, NR_DIRTY); radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); diff -puN fs/fs-writeback.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter fs/fs-writeback.c --- devel/fs/fs-writeback.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter 2006-06-09 02:49:43.000000000 -0700 +++ devel-akpm/fs/fs-writeback.c 2006-06-09 02:49:43.000000000 -0700 @@ -464,7 +464,7 @@ void sync_inodes_sb(struct super_block * .range_start = 0, .range_end = LLONG_MAX, }; - unsigned long nr_dirty = read_page_state(nr_dirty); + unsigned long nr_dirty = global_page_state(NR_DIRTY); unsigned long nr_unstable = read_page_state(nr_unstable); wbc.nr_to_write = nr_dirty + nr_unstable + diff -puN fs/nfs/pagelist.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter fs/nfs/pagelist.c --- devel/fs/nfs/pagelist.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter 2006-06-09 02:49:43.000000000 -0700 +++ devel-akpm/fs/nfs/pagelist.c 2006-06-09 02:49:43.000000000 -0700 @@ -315,6 +315,7 @@ nfs_scan_lock_dirty(struct nfs_inode *nf req->wb_index, NFS_PAGE_TAG_DIRTY); nfs_list_remove_request(req); nfs_list_add_request(req, dst); + inc_zone_page_state(req->wb_page, NR_DIRTY); res++; } } diff -puN fs/nfs/write.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter fs/nfs/write.c --- devel/fs/nfs/write.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter 2006-06-09 02:49:43.000000000 -0700 +++ devel-akpm/fs/nfs/write.c 2006-06-09 02:49:43.000000000 -0700 @@ -497,7 +497,7 @@ nfs_mark_request_dirty(struct nfs_page * nfs_list_add_request(req, &nfsi->dirty); nfsi->ndirty++; spin_unlock(&nfsi->req_lock); - inc_page_state(nr_dirty); + inc_zone_page_state(req->wb_page, NR_DIRTY); mark_inode_dirty(inode); } @@ -598,7 +598,6 @@ nfs_scan_dirty(struct inode *inode, stru if (nfsi->ndirty != 0) { res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages); nfsi->ndirty -= res; - sub_page_state(nr_dirty,res); if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)) printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n"); } diff -puN fs/proc/proc_misc.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter fs/proc/proc_misc.c --- devel/fs/proc/proc_misc.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter 2006-06-09 02:49:43.000000000 -0700 +++ devel-akpm/fs/proc/proc_misc.c 2006-06-09 02:49:43.000000000 -0700 @@ -188,7 +188,7 @@ static int meminfo_read_proc(char *page, K(i.freeram-i.freehigh), K(i.totalswap), K(i.freeswap), - K(ps.nr_dirty), + K(global_page_state(NR_DIRTY)), K(ps.nr_writeback), K(global_page_state(NR_MAPPED)), K(global_page_state(NR_SLAB)), diff -puN include/linux/mmzone.h~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter include/linux/mmzone.h --- devel/include/linux/mmzone.h~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter 2006-06-09 02:49:43.000000000 -0700 +++ devel-akpm/include/linux/mmzone.h 2006-06-09 02:49:43.000000000 -0700 @@ -52,6 +52,7 @@ enum zone_stat_item { NR_PAGECACHE, /* file backed pages */ NR_SLAB, /* used by slab allocator */ NR_PAGETABLE, /* used for pagetables */ + NR_DIRTY, NR_STAT_ITEMS }; #ifdef CONFIG_SMP diff -puN include/linux/page-flags.h~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter include/linux/page-flags.h --- devel/include/linux/page-flags.h~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter 2006-06-09 02:49:43.000000000 -0700 +++ devel-akpm/include/linux/page-flags.h 2006-06-09 02:49:43.000000000 -0700 @@ -117,7 +117,6 @@ * commented here. */ struct page_state { - unsigned long nr_dirty; /* Dirty writeable pages */ unsigned long nr_writeback; /* Pages under writeback */ unsigned long nr_unstable; /* NFS unstable pages */ #define GET_PAGE_STATE_LAST nr_unstable diff -puN mm/page_alloc.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter mm/page_alloc.c --- devel/mm/page_alloc.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter 2006-06-09 02:49:43.000000000 -0700 +++ devel-akpm/mm/page_alloc.c 2006-06-09 02:49:43.000000000 -0700 @@ -613,7 +613,9 @@ static int rmqueue_bulk(struct zone *zon return i; } -char *vm_stat_item_descr[NR_STAT_ITEMS] = { "mapped","pagecache", "slab", "pagetable" }; +char *vm_stat_item_descr[NR_STAT_ITEMS] = { + "mapped", "pagecache", "slab", "pagetable", "dirty" +}; /* * Manage combined zone based / global counters @@ -1717,7 +1719,7 @@ void show_free_areas(void) "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n", active, inactive, - ps.nr_dirty, + global_page_state(NR_DIRTY), ps.nr_writeback, ps.nr_unstable, nr_free_pages(), @@ -2714,9 +2716,9 @@ static char *vmstat_text[] = { "nr_pagecache", "nr_slab", "nr_page_table_pages", + "nr_dirty", /* Page state */ - "nr_dirty", "nr_writeback", "nr_unstable", diff -puN mm/page-writeback.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter mm/page-writeback.c --- devel/mm/page-writeback.c~zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter 2006-06-09 02:49:43.000000000 -0700 +++ devel-akpm/mm/page-writeback.c 2006-06-09 02:49:43.000000000 -0700 @@ -109,7 +109,7 @@ struct writeback_state static void get_writeback_state(struct writeback_state *wbs) { - wbs->nr_dirty = read_page_state(nr_dirty); + wbs->nr_dirty = global_page_state(NR_DIRTY); wbs->nr_unstable = read_page_state(nr_unstable); wbs->nr_mapped = global_page_state(NR_MAPPED); wbs->nr_writeback = read_page_state(nr_writeback); @@ -640,7 +640,7 @@ int __set_page_dirty_nobuffers(struct pa if (mapping2) { /* Race with truncate? */ BUG_ON(mapping2 != mapping); if (mapping_cap_account_dirty(mapping)) - inc_page_state(nr_dirty); + __inc_zone_page_state(page, NR_DIRTY); radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } @@ -727,9 +727,9 @@ int test_clear_page_dirty(struct page *p radix_tree_tag_clear(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); - write_unlock_irqrestore(&mapping->tree_lock, flags); if (mapping_cap_account_dirty(mapping)) - dec_page_state(nr_dirty); + __dec_zone_page_state(page, NR_DIRTY); + write_unlock_irqrestore(&mapping->tree_lock, flags); return 1; } write_unlock_irqrestore(&mapping->tree_lock, flags); @@ -760,7 +760,7 @@ int clear_page_dirty_for_io(struct page if (mapping) { if (TestClearPageDirty(page)) { if (mapping_cap_account_dirty(mapping)) - dec_page_state(nr_dirty); + dec_zone_page_state(page, NR_DIRTY); return 1; } return 0; _ Patches currently in -mm which might be from clameter@xxxxxxx are page-migration-make-do_swap_page-redo-the-fault.patch slab-extract-cache_free_alien-from-__cache_free.patch migration-remove-unnecessary-pageswapcache-checks.patch page-migration-cleanup-rename-ignrefs-to-migration.patch page-migration-cleanup-group-functions.patch page-migration-cleanup-remove-useless-definitions.patch page-migration-cleanup-drop-nr_refs-in-remove_references.patch page-migration-cleanup-extract-try_to_unmap-from-migration-functions.patch page-migration-cleanup-pass-mapping-to-migration-functions.patch page-migration-cleanup-move-fallback-handling-into-special-function.patch swapless-pm-add-r-w-migration-entries.patch swapless-page-migration-rip-out-swap-based-logic.patch swapless-page-migration-modify-core-logic.patch more-page-migration-do-not-inc-dec-rss-counters.patch more-page-migration-use-migration-entries-for-file-pages.patch page-migration-update-documentation.patch mm-remove-vm_locked-before-remap_pfn_range-and-drop-vm_shm.patch page-migration-simplify-migrate_pages.patch page-migration-simplify-migrate_pages-tweaks.patch page-migration-handle-freeing-of-pages-in-migrate_pages.patch page-migration-use-allocator-function-for-migrate_pages.patch page-migration-support-moving-of-individual-pages.patch page-migration-detailed-status-for-moving-of-individual-pages.patch page-migration-support-moving-of-individual-pages-fixes.patch page-migration-support-moving-of-individual-pages-x86_64-support.patch page-migration-support-moving-of-individual-pages-x86-support.patch page-migration-support-a-vma-migration-function.patch allow-migration-of-mlocked-pages.patch zoned-vm-counters-per-zone-counter-functionality.patch zoned-vm-counters-include-per-zone-counters-in-proc-vmstat.patch zoned-vm-counters-conversion-of-nr_mapped-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter.patch zoned-vm-counters-use-per-zone-counters-to-remove-zone_reclaim_interval.patch zoned-vm-counters-add-per-zone-counters-to-zone-node-and-global-vm-statistics.patch zoned-vm-counters-conversion-of-nr_slab-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_pagetable-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_writeback-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_unstable-to-per-zone-counter.patch zoned-vm-counters-remove-unused-get_page_stat-functions.patch zoned-vm-counters-conversion-of-nr_bounce-to-per-zone-counter.patch zoned-vm-counters-remove-useless-writeback-structure.patch cpuset-remove-extra-cpuset_zone_allowed-check-in-__alloc_pages.patch swap_prefetch-conversion-of-nr_mapped-to-per-zone-counter.patch swap_prefetch-conversion-of-nr_slab-to-per-zone-counter.patch swap_prefetch-conversion-of-nr_dirty-to-per-zone-counter.patch swap_prefetch-conversion-of-nr_writeback-to-per-zone-counter.patch swap_prefetch-conversion-of-nr_unstable-to-per-zone-counter.patch swap_prefetch-remove-unused-get_page_stat-functions.patch reiser4-conversion-of-nr_dirty-to-per-zone-counter.patch - To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html