- zoned-vm-counters-basic-zvc-zoned-vm-counter-implementation-speedup.patch removed from -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled

     Inline counters for single processor configurations

has been removed from the -mm tree.  Its filename is

     zoned-vm-counters-basic-zvc-zoned-vm-counter-implementation-speedup.patch

This patch was dropped because it was folded into zoned-vm-counters-basic-zvc-zoned-vm-counter-implementation.patch

------------------------------------------------------
Subject: Inline counters for single processor configurations
From: Christoph Lameter <clameter@xxxxxxx>


ZVC: inline counters handling on single processor systems.

Since we always use atomic operations to update counters on single
processor systems we do no need to distinguish between the case
when we have disabled interrupts or when we did not.

Each update is then only two atomic adds. Code size shrinks if we
switch to inlining instead of providing explicit functions in vmstat.c.

Inline ZVC counters:
-rw-r--r-- 1 root root 1865626 Jun 27 03:06 vmlinuz-2.6.17-mm3

Function calls:
-rw-r--r-- 1 root root 1865792 Jun 27 03:21 /boot/vmlinuz-2.6.17-mm3

Move zone_page_state() and single processor functions from vmstat.c
into vmstat.h. Remove all interrupt disable/enable for single processor.

Signed-off-by: Christoph Lameter <clameter@xxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxx>
---

 include/linux/vmstat.h |   59 ++++++++++++++++++++++++++++++------
 mm/vmstat.c            |   63 ---------------------------------------
 2 files changed, 50 insertions(+), 72 deletions(-)

diff -puN include/linux/vmstat.h~zoned-vm-counters-basic-zvc-zoned-vm-counter-implementation-speedup include/linux/vmstat.h
--- 25/include/linux/vmstat.h~zoned-vm-counters-basic-zvc-zoned-vm-counter-implementation-speedup	Tue Jun 27 15:22:38 2006
+++ 25-akpm/include/linux/vmstat.h	Tue Jun 27 15:22:38 2006
@@ -142,6 +142,13 @@ DECLARE_PER_CPU(struct page_state, page_
  */
 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
 
+static inline void zone_page_state_add(long x, struct zone *zone,
+				 enum zone_stat_item item)
+{
+	atomic_long_add(x, &zone->vm_stat[item]);
+	atomic_long_add(x, &vm_stat[item]);
+}
+
 static inline unsigned long global_page_state(enum zone_stat_item item)
 {
 	long x = atomic_long_read(&vm_stat[item]);
@@ -190,19 +197,11 @@ static inline unsigned long node_page_st
 #define node_page_state(node, item) global_page_state(item)
 #endif
 
-void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
-void __inc_zone_page_state(struct page *, enum zone_stat_item);
-void __dec_zone_page_state(struct page *, enum zone_stat_item);
-
 #define __add_zone_page_state(__z, __i, __d)	\
 		__mod_zone_page_state(__z, __i, __d)
 #define __sub_zone_page_state(__z, __i, __d)	\
 		__mod_zone_page_state(__z, __i,-(__d))
 
-void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
-void inc_zone_page_state(struct page *, enum zone_stat_item);
-void dec_zone_page_state(struct page *, enum zone_stat_item);
-
 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
 
@@ -212,9 +211,51 @@ static inline void zap_zone_vm_stats(str
 }
 
 #ifdef CONFIG_SMP
+void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
+void __inc_zone_page_state(struct page *, enum zone_stat_item);
+void __dec_zone_page_state(struct page *, enum zone_stat_item);
+
+void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
+void inc_zone_page_state(struct page *, enum zone_stat_item);
+void dec_zone_page_state(struct page *, enum zone_stat_item);
+
+extern void inc_zone_state(struct zone *, enum zone_stat_item);
+
 void refresh_cpu_vm_stats(int);
 void refresh_vm_stats(void);
-#else
+
+#else /* CONFIG_SMP */
+
+/*
+ * We do not maintain differentials in a single processor configuration.
+ * The functions directly modify the zone and global counters.
+ */
+static inline void __mod_zone_page_state(struct zone *zone,
+			enum zone_stat_item item, int delta)
+{
+	zone_page_state_add(delta, zone, item);
+}
+
+static inline void __inc_zone_page_state(struct page *page,
+			enum zone_stat_item item)
+{
+	zone_page_state_add(1, page_zone(page), item);
+}
+
+static inline void __dec_zone_page_state(struct page *page,
+			enum zone_stat_item item)
+{
+	zone_page_state_add(-1, page_zone(page), item);
+}
+
+/*
+ * We only use atomic operations to update counters. So there is no need to
+ * disable interrupts.
+ */
+#define inc_zone_page_state __inc_zone_page_state
+#define dec_zone_page_state __dec_zone_page_state
+#define mod_zone_page_state __mod_zone_page_state
+
 static inline void refresh_cpu_vm_stats(int cpu) { }
 static inline void refresh_vm_stats(void) { }
 #endif
diff -puN mm/vmstat.c~zoned-vm-counters-basic-zvc-zoned-vm-counter-implementation-speedup mm/vmstat.c
--- 25/mm/vmstat.c~zoned-vm-counters-basic-zvc-zoned-vm-counter-implementation-speedup	Tue Jun 27 15:22:38 2006
+++ 25-akpm/mm/vmstat.c	Tue Jun 27 15:22:38 2006
@@ -155,13 +155,6 @@ void get_zone_counts(unsigned long *acti
  */
 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
 
-static inline void zone_page_state_add(long x, struct zone *zone,
-				 enum zone_stat_item item)
-{
-	atomic_long_add(x, &zone->vm_stat[item]);
-	atomic_long_add(x, &vm_stat[item]);
-}
-
 #ifdef CONFIG_SMP
 
 #define STAT_THRESHOLD 32
@@ -343,62 +336,6 @@ void refresh_vm_stats(void)
 }
 EXPORT_SYMBOL(refresh_vm_stats);
 
-#else /* CONFIG_SMP */
-
-/*
- * We do not maintain differentials in a single processor configuration.
- * The functions directly modify the zone and global counters.
- */
-
-void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
-				int delta)
-{
-	zone_page_state_add(delta, zone, item);
-}
-EXPORT_SYMBOL(__mod_zone_page_state);
-
-void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
- 				int delta)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	zone_page_state_add(delta, zone, item);
-	local_irq_restore(flags);
-}
-EXPORT_SYMBOL(mod_zone_page_state);
-
-void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
-{
-	zone_page_state_add(1, page_zone(page), item);
-}
-EXPORT_SYMBOL(__inc_zone_page_state);
-
-void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
-{
-	zone_page_state_add(-1, page_zone(page), item);
-}
-EXPORT_SYMBOL(__dec_zone_page_state);
-
-void inc_zone_page_state(struct page *page, enum zone_stat_item item)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	zone_page_state_add(1, page_zone(page), item);
-	local_irq_restore(flags);
-}
-EXPORT_SYMBOL(inc_zone_page_state);
-
-void dec_zone_page_state(struct page *page, enum zone_stat_item item)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	zone_page_state_add( -1, page_zone(page), item);
-	local_irq_restore(flags);
-}
-EXPORT_SYMBOL(dec_zone_page_state);
 #endif
 
 #ifdef CONFIG_PROC_FS
_

Patches currently in -mm which might be from clameter@xxxxxxx are

origin.patch
usb-remove-empty-destructor-from-drivers-usb-mon-mon_textc.patch
zoned-vm-counters-create-vmstatc-h-from-page_allocc-h.patch
zoned-vm-counters-basic-zvc-zoned-vm-counter-implementation.patch
zoned-vm-counters-basic-zvc-zoned-vm-counter-implementation-speedup.patch
zoned-vm-counters-basic-zvc-zoned-vm-counter-implementation-speedup-fix.patch
zoned-vm-counters-convert-nr_mapped-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter.patch
zoned-vm-counters-remove-nr_file_mapped-from-scan-control-structure.patch
zoned-vm-counters-remove-nr_file_mapped-from-scan-control-structure-fix.patch
zoned-vm-counters-split-nr_anon_pages-off-from-nr_file_mapped.patch
zoned-vm-counters-zone_reclaim-remove-proc-sys-vm-zone_reclaim_interval.patch
zoned-vm-counters-conversion-of-nr_slab-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_slab-to-per-zone-counter-fix-2.patch
zoned-vm-counters-conversion-of-nr_pagetables-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_writeback-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_unstable-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_bounce-to-per-zone-counter.patch
zoned-vm-counters-conversion-of-nr_bounce-to-per-zone-counter-fix-2.patch
zoned-vm-counters-remove-useless-struct-wbs.patch
use-zoned-vm-counters-for-numa-statistics-v3.patch
light-weight-event-counters-v5.patch
slab-consolidate-code-to-free-slabs-from-freelist.patch

-
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Kernel Newbies FAQ]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Photo]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux