The patch titled light-weight-counters-framework fix has been removed from the -mm tree. Its filename is light-weight-counters-framework-fix.patch This patch was dropped because it was folded into another patch ------------------------------------------------------ Subject: light-weight-counters-framework fix From: Christoph Lameter <clameter@xxxxxxx> Eventcounter fixups - Add comment to all_vm_events - remove get_global_events. - fold foreign cpu events into our own. - Remove useless exports Signed-off-by: Christoph Lameter <clameter@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxx> --- include/linux/page-flags.h | 5 --- mm/page_alloc.c | 54 +++++++++++++++++------------------ 2 files changed, 27 insertions(+), 32 deletions(-) diff -puN include/linux/page-flags.h~light-weight-counters-framework-fix include/linux/page-flags.h --- devel/include/linux/page-flags.h~light-weight-counters-framework-fix 2006-06-09 16:06:59.000000000 -0700 +++ devel-akpm/include/linux/page-flags.h 2006-06-09 16:06:59.000000000 -0700 @@ -137,10 +137,6 @@ struct vm_event_state { DECLARE_PER_CPU(struct vm_event_state, vm_event_states); -extern unsigned long get_global_vm_events(enum vm_event_item e); -extern void sum_vm_events(unsigned long *r, cpumask_t *cpumask); -extern void all_vm_events(unsigned long *r); - static inline unsigned long get_cpu_vm_events(enum vm_event_item item) { return __get_cpu_var(vm_event_states).event[item]; @@ -159,7 +155,6 @@ static inline void count_vm_events(enum #else /* Disable counters */ #define get_cpu_vm_events(e) 0L -#define get_global_vm_events(e) 0L #define count_vm_event(e) do { } while (0) #define count_vm_events(e,d) do { } while (0) #endif diff -puN mm/page_alloc.c~light-weight-counters-framework-fix mm/page_alloc.c --- devel/mm/page_alloc.c~light-weight-counters-framework-fix 2006-06-09 16:06:59.000000000 -0700 +++ devel-akpm/mm/page_alloc.c 2006-06-09 16:11:14.000000000 -0700 @@ -1512,7 +1512,7 @@ static void show_node(struct zone *zone) #ifdef CONFIG_VM_EVENT_COUNTERS DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; -void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) +static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) { int cpu = 0; int i; @@ -1533,25 +1533,16 @@ void sum_vm_events(unsigned long *ret, c ret[i] += this->event[i]; } } -EXPORT_SYMBOL(sum_vm_events); -void all_vm_events(unsigned long *ret) +/* + * Accumulate the vm event counters across all CPUs. + * The result is unavoidably approximate - it can change + * during and after execution of this function. +*/ +static void all_vm_events(unsigned long *ret) { sum_vm_events(ret, &cpu_online_map); } -EXPORT_SYMBOL(all_vm_events); - -unsigned long get_global_vm_events(enum vm_event_item e) -{ - unsigned long ret = 0; - int cpu; - - for_each_possible_cpu(cpu) - ret += per_cpu(vm_event_states, cpu).event[e]; - - return ret; -} -EXPORT_SYMBOL(get_global_vm_events); #endif void __get_zone_counts(unsigned long *active, unsigned long *inactive, @@ -2783,6 +2774,25 @@ struct seq_operations vmstat_op = { #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_HOTPLUG_CPU +/* + * Fold the foreign cpu states int our own. + * + * This is a pretty inconsistent thing to do since + * the event array is to count the events occurring + * for each processor. But we did this in the past + * so I guess that we need to continue. + */ +static void vm_events_fold_cpu(int cpu) +{ + struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); + int i; + + for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { + count_vm_events(i, fold_state->event[i]); + fold_state->event[i] = 0; + } +} + static int page_alloc_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { @@ -2794,17 +2804,7 @@ static int page_alloc_cpu_notify(struct local_irq_disable(); __drain_pages(cpu); - - /* Add dead cpu's page_states to our own. */ - dest = (unsigned long *)&__get_cpu_var(page_states); - src = (unsigned long *)&per_cpu(page_states, cpu); - - for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long); - i++) { - dest[i] += src[i]; - src[i] = 0; - } - + vm_events_fold_cpu(cpu); local_irq_enable(); refresh_cpu_vm_stats(cpu); } _ Patches currently in -mm which might be from clameter@xxxxxxx are page-migration-make-do_swap_page-redo-the-fault.patch slab-extract-cache_free_alien-from-__cache_free.patch migration-remove-unnecessary-pageswapcache-checks.patch page-migration-cleanup-rename-ignrefs-to-migration.patch page-migration-cleanup-group-functions.patch page-migration-cleanup-remove-useless-definitions.patch page-migration-cleanup-drop-nr_refs-in-remove_references.patch page-migration-cleanup-extract-try_to_unmap-from-migration-functions.patch page-migration-cleanup-pass-mapping-to-migration-functions.patch page-migration-cleanup-move-fallback-handling-into-special-function.patch swapless-pm-add-r-w-migration-entries.patch swapless-page-migration-rip-out-swap-based-logic.patch swapless-page-migration-modify-core-logic.patch more-page-migration-do-not-inc-dec-rss-counters.patch more-page-migration-use-migration-entries-for-file-pages.patch page-migration-update-documentation.patch mm-remove-vm_locked-before-remap_pfn_range-and-drop-vm_shm.patch page-migration-simplify-migrate_pages.patch page-migration-simplify-migrate_pages-tweaks.patch page-migration-handle-freeing-of-pages-in-migrate_pages.patch page-migration-use-allocator-function-for-migrate_pages.patch page-migration-support-moving-of-individual-pages.patch page-migration-detailed-status-for-moving-of-individual-pages.patch page-migration-support-moving-of-individual-pages-fixes.patch page-migration-support-moving-of-individual-pages-x86_64-support.patch page-migration-support-moving-of-individual-pages-x86-support.patch page-migration-support-a-vma-migration-function.patch allow-migration-of-mlocked-pages.patch zoned-vm-counters-per-zone-counter-functionality.patch zoned-vm-counters-per-zone-counter-functionality-tidy.patch zoned-vm-counters-per-zone-counter-functionality-fix-fix.patch zoned-vm-counters-include-per-zone-counters-in-proc-vmstat.patch zoned-vm-counters-conversion-of-nr_mapped-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_pagecache-to-per-zone-counter-fix.patch zoned-vm-counters-use-per-zone-counters-to-remove-zone_reclaim_interval.patch zoned-vm-counters-add-per-zone-counters-to-zone-node-and-global-vm-statistics.patch zoned-vm-counters-conversion-of-nr_slab-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_pagetable-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_dirty-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_writeback-to-per-zone-counter.patch zoned-vm-counters-conversion-of-nr_unstable-to-per-zone-counter.patch zoned-vm-counters-remove-unused-get_page_stat-functions.patch zoned-vm-counters-conversion-of-nr_bounce-to-per-zone-counter.patch zoned-vm-counters-remove-useless-writeback-structure.patch zoned-vm-stats-remove-nr_mapped-from-zone-reclaim.patch zoned-vm-stats-add-nr_anon.patch light-weight-counters-framework.patch light-weight-counters-framework-fix.patch light-weight-counters-counter-conversion.patch cpuset-remove-extra-cpuset_zone_allowed-check-in-__alloc_pages.patch swap_prefetch-conversion-of-nr_mapped-to-per-zone-counter.patch swap_prefetch-conversion-of-nr_slab-to-per-zone-counter.patch swap_prefetch-conversion-of-nr_dirty-to-per-zone-counter.patch swap_prefetch-conversion-of-nr_writeback-to-per-zone-counter.patch swap_prefetch-conversion-of-nr_unstable-to-per-zone-counter.patch swap_prefetch-remove-unused-get_page_stat-functions.patch zoned-vm-stats-nr_slab-is-accurate-fix-comment.patch swap_prefetch-zoned-vm-stats-add-nr_anon.patch reiser4-conversion-of-nr_dirty-to-per-zone-counter.patch - To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html