- light-weight-counters-counter-conversion.patch removed from -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled

     Light weight counters: counter conversion

has been removed from the -mm tree.  Its filename is

     light-weight-counters-counter-conversion.patch

This patch was dropped because it was folded into another patch

------------------------------------------------------
Subject: Light weight counters: counter conversion
From: Christoph Lameter <clameter@xxxxxxx>


Convert inc/add page_state to count_event(s)

Convert the page_state operations to count_event() and count_zone_event()

Signed-off-by: Christoph Lameter <clameter@xxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxx>
---

 block/ll_rw_blk.c    |    4 ++--
 drivers/parisc/led.c |    6 ++----
 fs/inode.c           |    4 ++--
 fs/ncpfs/mmap.c      |    2 +-
 mm/filemap.c         |    4 ++--
 mm/memory.c          |    4 ++--
 mm/page_alloc.c      |    6 +++---
 mm/page_io.c         |    4 ++--
 mm/shmem.c           |    2 +-
 mm/swap.c            |    4 ++--
 mm/vmscan.c          |   23 +++++++++++------------
 11 files changed, 30 insertions(+), 33 deletions(-)

diff -puN block/ll_rw_blk.c~light-weight-counters-counter-conversion block/ll_rw_blk.c
--- 25/block/ll_rw_blk.c~light-weight-counters-counter-conversion	Fri Jun  9 15:01:52 2006
+++ 25-akpm/block/ll_rw_blk.c	Fri Jun  9 15:01:52 2006
@@ -3117,9 +3117,9 @@ void submit_bio(int rw, struct bio *bio)
 	BIO_BUG_ON(!bio->bi_io_vec);
 	bio->bi_rw |= rw;
 	if (rw & WRITE)
-		mod_page_state(pgpgout, count);
+		count_vm_events(PGPGOUT, count);
 	else
-		mod_page_state(pgpgin, count);
+		count_vm_events(PGPGIN, count);
 
 	if (unlikely(block_dump)) {
 		char b[BDEVNAME_SIZE];
diff -puN drivers/parisc/led.c~light-weight-counters-counter-conversion drivers/parisc/led.c
--- 25/drivers/parisc/led.c~light-weight-counters-counter-conversion	Fri Jun  9 15:01:52 2006
+++ 25-akpm/drivers/parisc/led.c	Fri Jun  9 15:01:52 2006
@@ -411,14 +411,12 @@ static __inline__ int led_get_net_activi
 static __inline__ int led_get_diskio_activity(void)
 {	
 	static unsigned long last_pgpgin, last_pgpgout;
-	struct page_state pgstat;
 	int changed;
 
-	get_full_page_state(&pgstat); /* get no of sectors in & out */
-
 	/* Just use a very simple calculation here. Do not care about overflow,
 	   since we only want to know if there was activity or not. */
-	changed = (pgstat.pgpgin != last_pgpgin) || (pgstat.pgpgout != last_pgpgout);
+	changed = (get_global_events(PGPGIN) != last_pgpgin) ||
+		  (get_global_events(PGPGOUT) != last_pgpgout);
 	last_pgpgin  = pgstat.pgpgin;
 	last_pgpgout = pgstat.pgpgout;
 
diff -puN fs/inode.c~light-weight-counters-counter-conversion fs/inode.c
--- 25/fs/inode.c~light-weight-counters-counter-conversion	Fri Jun  9 15:01:52 2006
+++ 25-akpm/fs/inode.c	Fri Jun  9 15:01:52 2006
@@ -458,9 +458,9 @@ static void prune_icache(int nr_to_scan)
 	mutex_unlock(&iprune_mutex);
 
 	if (current_is_kswapd())
-		mod_page_state(kswapd_inodesteal, reap);
+		count_vm_events(KSWAPD_INODESTEAL, reap);
 	else
-		mod_page_state(pginodesteal, reap);
+		count_vm_events(PGINODESTEAL, reap);
 }
 
 /*
diff -puN fs/ncpfs/mmap.c~light-weight-counters-counter-conversion fs/ncpfs/mmap.c
--- 25/fs/ncpfs/mmap.c~light-weight-counters-counter-conversion	Fri Jun  9 15:01:52 2006
+++ 25-akpm/fs/ncpfs/mmap.c	Fri Jun  9 15:01:52 2006
@@ -93,7 +93,7 @@ static struct page* ncp_file_mmap_nopage
 	 */
 	if (type)
 		*type = VM_FAULT_MAJOR;
-	inc_page_state(pgmajfault);
+	count_vm_event(PGMAJFAULT);
 	return page;
 }
 
diff -puN mm/filemap.c~light-weight-counters-counter-conversion mm/filemap.c
--- 25/mm/filemap.c~light-weight-counters-counter-conversion	Fri Jun  9 15:01:52 2006
+++ 25-akpm/mm/filemap.c	Fri Jun  9 15:01:52 2006
@@ -1327,7 +1327,7 @@ retry_find:
 		 */
 		if (!did_readaround) {
 			majmin = VM_FAULT_MAJOR;
-			inc_page_state(pgmajfault);
+			count_vm_event(PGMAJFAULT);
 		}
 		did_readaround = 1;
 		ra_pages = max_sane_readahead(file->f_ra.ra_pages);
@@ -1398,7 +1398,7 @@ no_cached_page:
 page_not_uptodate:
 	if (!did_readaround) {
 		majmin = VM_FAULT_MAJOR;
-		inc_page_state(pgmajfault);
+		count_vm_event(PGMAJFAULT);
 	}
 	lock_page(page);
 
diff -puN mm/memory.c~light-weight-counters-counter-conversion mm/memory.c
--- 25/mm/memory.c~light-weight-counters-counter-conversion	Fri Jun  9 15:01:52 2006
+++ 25-akpm/mm/memory.c	Fri Jun  9 15:01:52 2006
@@ -1950,7 +1950,7 @@ static int do_swap_page(struct mm_struct
 
 		/* Had to read the page from swap area: Major fault */
 		ret = VM_FAULT_MAJOR;
-		inc_page_state(pgmajfault);
+		count_vm_event(PGMAJFAULT);
 		grab_swap_token();
 	}
 
@@ -2323,7 +2323,7 @@ int __handle_mm_fault(struct mm_struct *
 
 	__set_current_state(TASK_RUNNING);
 
-	inc_page_state(pgfault);
+	count_vm_event(PGFAULT);
 
 	if (unlikely(is_vm_hugetlb_page(vma)))
 		return hugetlb_fault(mm, vma, address, write_access);
diff -puN mm/page_alloc.c~light-weight-counters-counter-conversion mm/page_alloc.c
--- 25/mm/page_alloc.c~light-weight-counters-counter-conversion	Fri Jun  9 15:01:52 2006
+++ 25-akpm/mm/page_alloc.c	Fri Jun  9 15:01:52 2006
@@ -456,7 +456,7 @@ static void __free_pages_ok(struct page 
 
 	kernel_map_pages(page, 1 << order, 0);
 	local_irq_save(flags);
-	__mod_page_state(pgfree, 1 << order);
+	count_vm_events(PGFREE, 1 << order);
 	free_one_page(page_zone(page), page, order);
 	local_irq_restore(flags);
 }
@@ -1027,7 +1027,7 @@ static void fastcall free_hot_cold_page(
 
 	pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
 	local_irq_save(flags);
-	__inc_page_state(pgfree);
+	count_vm_event(PGFREE);
 	list_add(&page->lru, &pcp->list);
 	pcp->count++;
 	if (pcp->count >= pcp->high) {
@@ -1103,7 +1103,7 @@ again:
 			goto failed;
 	}
 
-	__mod_page_state_zone(zone, pgalloc, 1 << order);
+	count_zone_vm_events(PGALLOC, zone, 1 << order);
 	zone_statistics(zonelist, zone, cpu);
 	local_irq_restore(flags);
 	put_cpu();
diff -puN mm/page_io.c~light-weight-counters-counter-conversion mm/page_io.c
--- 25/mm/page_io.c~light-weight-counters-counter-conversion	Fri Jun  9 15:01:52 2006
+++ 25-akpm/mm/page_io.c	Fri Jun  9 15:01:52 2006
@@ -101,7 +101,7 @@ int swap_writepage(struct page *page, st
 	}
 	if (wbc->sync_mode == WB_SYNC_ALL)
 		rw |= (1 << BIO_RW_SYNC);
-	inc_page_state(pswpout);
+	count_vm_event(PSWPOUT);
 	set_page_writeback(page);
 	unlock_page(page);
 	submit_bio(rw, bio);
@@ -123,7 +123,7 @@ int swap_readpage(struct file *file, str
 		ret = -ENOMEM;
 		goto out;
 	}
-	inc_page_state(pswpin);
+	count_vm_event(PSWPIN);
 	submit_bio(READ, bio);
 out:
 	return ret;
diff -puN mm/shmem.c~light-weight-counters-counter-conversion mm/shmem.c
--- 25/mm/shmem.c~light-weight-counters-counter-conversion	Fri Jun  9 15:01:52 2006
+++ 25-akpm/mm/shmem.c	Fri Jun  9 15:01:52 2006
@@ -1049,7 +1049,7 @@ repeat:
 			spin_unlock(&info->lock);
 			/* here we actually do the io */
 			if (type && *type == VM_FAULT_MINOR) {
-				inc_page_state(pgmajfault);
+				count_vm_event(PGMAJFAULT);
 				*type = VM_FAULT_MAJOR;
 			}
 			swappage = shmem_swapin(info, swap, idx);
diff -puN mm/swap.c~light-weight-counters-counter-conversion mm/swap.c
--- 25/mm/swap.c~light-weight-counters-counter-conversion	Fri Jun  9 15:01:52 2006
+++ 25-akpm/mm/swap.c	Fri Jun  9 15:03:31 2006
@@ -88,7 +88,7 @@ int rotate_reclaimable_page(struct page 
 	if (PageLRU(page) && !PageActive(page)) {
 		list_del(&page->lru);
 		list_add_tail(&page->lru, &zone->inactive_list);
-		inc_page_state(pgrotated);
+		count_vm_event(PGROTATED);
 	}
 	if (!test_clear_page_writeback(page))
 		BUG();
@@ -108,7 +108,7 @@ void fastcall activate_page(struct page 
 		del_page_from_inactive_list(zone, page);
 		SetPageActive(page);
 		add_page_to_active_list(zone, page);
-		inc_page_state(pgactivate);
+		count_vm_event(PGACTIVATE);
 	}
 	spin_unlock_irq(&zone->lru_lock);
 }
diff -puN mm/vmscan.c~light-weight-counters-counter-conversion mm/vmscan.c
--- 25/mm/vmscan.c~light-weight-counters-counter-conversion	Fri Jun  9 15:01:52 2006
+++ 25-akpm/mm/vmscan.c	Fri Jun  9 15:03:12 2006
@@ -215,7 +215,7 @@ unsigned long shrink_slab(unsigned long 
 				break;
 			if (shrink_ret < nr_before)
 				ret += nr_before - shrink_ret;
-			mod_page_state(slabs_scanned, this_scan);
+			count_vm_events(SLABS_SCANNED, this_scan);
 			total_scan -= this_scan;
 
 			cond_resched();
@@ -569,7 +569,7 @@ keep:
 	list_splice(&ret_pages, page_list);
 	if (pagevec_count(&freed_pvec))
 		__pagevec_release_nonlru(&freed_pvec);
-	mod_page_state(pgactivate, pgactivate);
+	count_vm_events(PGACTIVATE, pgactivate);
 	return nr_reclaimed;
 }
 
@@ -659,11 +659,11 @@ static unsigned long shrink_inactive_lis
 		nr_reclaimed += nr_freed;
 		local_irq_disable();
 		if (current_is_kswapd()) {
-			__mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
-			__mod_page_state(kswapd_steal, nr_freed);
+			count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
+			count_vm_events(KSWAPD_STEAL, nr_freed);
 		} else
-			__mod_page_state_zone(zone, pgscan_direct, nr_scan);
-		__mod_page_state_zone(zone, pgsteal, nr_freed);
+			count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
+		count_vm_events(PGACTIVATE, nr_freed);
 
 		if (nr_taken == 0)
 			goto done;
@@ -840,11 +840,10 @@ static void shrink_active_list(unsigned 
 		}
 	}
 	zone->nr_active += pgmoved;
-	spin_unlock(&zone->lru_lock);
+	spin_unlock_irq(&zone->lru_lock);
 
-	__mod_page_state_zone(zone, pgrefill, pgscanned);
-	__mod_page_state(pgdeactivate, pgdeactivate);
-	local_irq_enable();
+	count_zone_vm_events(PGREFILL, zone, pgscanned);
+	count_vm_events(PGDEACTIVATE, pgdeactivate);
 
 	pagevec_release(&pvec);
 }
@@ -976,7 +975,7 @@ unsigned long try_to_free_pages(struct z
 		.swappiness = vm_swappiness,
 	};
 
-	inc_page_state(allocstall);
+	count_vm_event(ALLOCSTALL);
 
 	for (i = 0; zones[i] != NULL; i++) {
 		struct zone *zone = zones[i];
@@ -1074,7 +1073,7 @@ loop_again:
 	nr_reclaimed = 0;
 	sc.may_writepage = !laptop_mode,
 
-	inc_page_state(pageoutrun);
+	count_vm_event(PAGEOUTRUN);
 
 	for (i = 0; i < pgdat->nr_zones; i++) {
 		struct zone *zone = pgdat->node_zones + i;
_

Patches currently in -mm which might be from clameter@xxxxxxx are

page-migration-make-do_swap_page-redo-the-fault.patch
slab-extract-cache_free_alien-from-__cache_free.patch
migration-remove-unnecessary-pageswapcache-checks.patch
page-migration-cleanup-rename-ignrefs-to-migration.patch
page-migration-cleanup-group-functions.patch
page-migration-cleanup-remove-useless-definitions.patch
page-migration-cleanup-drop-nr_refs-in-remove_references.patch
page-migration-cleanup-extract-try_to_unmap-from-migration-functions.patch
page-migration-cleanup-pass-mapping-to-migration-functions.patch
page-migration-cleanup-move-fallback-handling-into-special-function.patch
swapless-pm-add-r-w-migration-entries.patch
swapless-page-migration-rip-out-swap-based-logic.patch
swapless-page-migration-modify-core-logic.patch
more-page-migration-do-not-inc-dec-rss-counters.patch
more-page-migration-use-migration-entries-for-file-pages.patch
page-migration-update-documentation.patch
mm-remove-vm_locked-before-remap_pfn_range-and-drop-vm_shm.patch
page-migration-simplify-migrate_pages.patch
page-migration-simplify-migrate_pages-tweaks.patch
page-migration-handle-freeing-of-pages-in-migrate_pages.patch
page-migration-use-allocator-function-for-migrate_pages.patch
page-migration-support-moving-of-individual-pages.patch
page-migration-detailed-status-for-moving-of-individual-pages.patch
page-migration-support-moving-of-individual-pages-fixes.patch
page-migration-support-moving-of-individual-pages-x86_64-support.patch
page-migration-support-moving-of-individual-pages-x86-support.patch
page-migration-support-a-vma-migration-function.patch
allow-migration-of-mlocked-pages.patch
zoned-vm-counters-per-zone-counter-functionality.patch
zoned-vm-counters-per-zone-counter-functionality-tidy.patch
zoned-vm-counters-per-zone-counter-functionality-fix-fix.patch
zoned-vm-counters-include-per-zone-counters-in-proc-vmstat.patch
zoned-vm-counters-conversion-of-nr_mapped-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter-fix.patch
zoned-vm-counters-use-per-zone-counters-to-remove-zone_reclaim_interval.patch
zoned-vm-counters-add-per-zone-counters-to-zone-node-and-global-vm-statistics.patch
zoned-vm-counters-conversion-of-nr_slab-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_pagetable-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_writeback-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_unstable-to-per-zone-counter.patch
zoned-vm-counters-remove-unused-get_page_stat-functions.patch
zoned-vm-counters-conversion-of-nr_bounce-to-per-zone-counter.patch
zoned-vm-counters-remove-useless-writeback-structure.patch
zoned-vm-stats-remove-nr_mapped-from-zone-reclaim.patch
zoned-vm-stats-add-nr_anon.patch
light-weight-counters-framework.patch
light-weight-counters-counter-conversion.patch
cpuset-remove-extra-cpuset_zone_allowed-check-in-__alloc_pages.patch
swap_prefetch-conversion-of-nr_mapped-to-per-zone-counter.patch
swap_prefetch-conversion-of-nr_slab-to-per-zone-counter.patch
swap_prefetch-conversion-of-nr_dirty-to-per-zone-counter.patch
swap_prefetch-conversion-of-nr_writeback-to-per-zone-counter.patch
swap_prefetch-conversion-of-nr_unstable-to-per-zone-counter.patch
swap_prefetch-remove-unused-get_page_stat-functions.patch
zoned-vm-stats-nr_slab-is-accurate-fix-comment.patch
swap_prefetch-zoned-vm-stats-add-nr_anon.patch
reiser4-conversion-of-nr_dirty-to-per-zone-counter.patch

-
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Kernel Newbies FAQ]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Photo]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux