+ optional-zone_dma-in-the-vm.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled

     optional ZONE_DMA: optional ZONE_DMA in the VM

has been added to the -mm tree.  Its filename is

     optional-zone_dma-in-the-vm.patch

See http://www.zip.com.au/~akpm/linux/patches/stuff/added-to-mm.txt to find
out what to do about this

------------------------------------------------------
Subject: optional ZONE_DMA: optional ZONE_DMA in the VM
From: Christoph Lameter <clameter@xxxxxxx>

Make ZONE_DMA optional in core code.

- ifdef all code for ZONE_DMA and related definitions following the example
  for ZONE_DMA32 and ZONE_HIGHMEM.

- Without ZONE_DMA, ZONE_HIGHMEM and ZONE_DMA32 we get to a ZONES_SHIFT of
  0.

- Modify the VM statistics to work correctly without a DMA zone.

- Modify slab to not create DMA slabs if there is no ZONE_DMA.

Signed-off-by: Christoph Lameter <clameter@xxxxxxx>
Cc: Andi Kleen <ak@xxxxxxx>
Cc: "Luck, Tony" <tony.luck@xxxxxxxxx>
Cc: Kyle McMartin <kyle@xxxxxxxxxxx>
Cc: Matthew Wilcox <willy@xxxxxxxxxx>
Cc: James Bottomley <James.Bottomley@xxxxxxxxxxxx>
Cc: Paul Mundt <lethal@xxxxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxx>
---

 include/linux/gfp.h    |    2 ++
 include/linux/mmzone.h |   10 ++++++++++
 include/linux/slab.h   |    4 ++++
 include/linux/vmstat.h |   17 +++++++++++++----
 mm/page_alloc.c        |    4 ++++
 mm/slab.c              |    5 ++++-
 mm/vmstat.c            |    8 +++++++-
 7 files changed, 44 insertions(+), 6 deletions(-)

diff -puN include/linux/gfp.h~optional-zone_dma-in-the-vm include/linux/gfp.h
--- a/include/linux/gfp.h~optional-zone_dma-in-the-vm
+++ a/include/linux/gfp.h
@@ -85,8 +85,10 @@ struct vm_area_struct;
 
 static inline enum zone_type gfp_zone(gfp_t flags)
 {
+#ifdef CONFIG_ZONE_DMA
 	if (flags & __GFP_DMA)
 		return ZONE_DMA;
+#endif
 #ifdef CONFIG_ZONE_DMA32
 	if (flags & __GFP_DMA32)
 		return ZONE_DMA32;
diff -puN include/linux/mmzone.h~optional-zone_dma-in-the-vm include/linux/mmzone.h
--- a/include/linux/mmzone.h~optional-zone_dma-in-the-vm
+++ a/include/linux/mmzone.h
@@ -91,6 +91,7 @@ struct per_cpu_pageset {
 #endif
 
 enum zone_type {
+#ifdef CONFIG_ZONE_DMA
 	/*
 	 * ZONE_DMA is used when there are devices that are not able
 	 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
@@ -111,6 +112,7 @@ enum zone_type {
 	 * 			<16M.
 	 */
 	ZONE_DMA,
+#endif
 #ifdef CONFIG_ZONE_DMA32
 	/*
 	 * x86_64 needs two ZONE_DMAs because it supports devices that are
@@ -148,7 +150,11 @@ enum zone_type {
  */
 
 #if !defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_HIGHMEM)
+#if !defined(CONFIG_ZONE_DMA)
+#define ZONES_SHIFT 0
+#else
 #define ZONES_SHIFT 1
+#endif
 #else
 #define ZONES_SHIFT 2
 #endif
@@ -448,7 +454,11 @@ static inline int is_dma32(struct zone *
 
 static inline int is_dma(struct zone *zone)
 {
+#ifdef CONFIG_ZONE_DMA
 	return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
+#else
+	return 0;
+#endif
 }
 
 /* These two functions are used to setup the per zone pages min values */
diff -puN include/linux/slab.h~optional-zone_dma-in-the-vm include/linux/slab.h
--- a/include/linux/slab.h~optional-zone_dma-in-the-vm
+++ a/include/linux/slab.h
@@ -72,7 +72,11 @@ extern const char *kmem_cache_name(kmem_
 struct cache_sizes {
 	size_t		 cs_size;
 	kmem_cache_t	*cs_cachep;
+#ifdef CONFIG_ZONE_DMA
 	kmem_cache_t	*cs_dmacachep;
+#else
+#define cs_dmacachep cs_cachep
+#endif
 };
 extern struct cache_sizes malloc_sizes[];
 
diff -puN include/linux/vmstat.h~optional-zone_dma-in-the-vm include/linux/vmstat.h
--- a/include/linux/vmstat.h~optional-zone_dma-in-the-vm
+++ a/include/linux/vmstat.h
@@ -18,6 +18,12 @@
  * generated will simply be the increment of a global address.
  */
 
+#ifdef CONFIG_ZONE_DMA
+#define DMA_ZONE(xx) xx##_DMA,
+#else
+#define DMA_ZONE(xx)
+#endif
+
 #ifdef CONFIG_ZONE_DMA32
 #define DMA32_ZONE(xx) xx##_DMA32,
 #else
@@ -30,7 +36,7 @@
 #define HIGHMEM_ZONE(xx)
 #endif
 
-#define FOR_ALL_ZONES(xx) xx##_DMA, DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx)
+#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx)
 
 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
 		FOR_ALL_ZONES(PGALLOC),
@@ -89,7 +95,8 @@ extern void vm_events_fold_cpu(int cpu);
 #endif /* CONFIG_VM_EVENT_COUNTERS */
 
 #define __count_zone_vm_events(item, zone, delta) \
-			__count_vm_events(item##_DMA + zone_idx(zone), delta)
+		__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
+		zone_idx(zone), delta)
 
 /*
  * Zone based page accounting with per cpu differentials.
@@ -136,14 +143,16 @@ static inline unsigned long node_page_st
 	struct zone *zones = NODE_DATA(node)->node_zones;
 
 	return
+#ifdef CONFIG_ZONE_DMA
+		zone_page_state(&zones[ZONE_DMA], item) +
+#endif
 #ifdef CONFIG_ZONE_DMA32
 		zone_page_state(&zones[ZONE_DMA32], item) +
 #endif
-		zone_page_state(&zones[ZONE_NORMAL], item) +
 #ifdef CONFIG_HIGHMEM
 		zone_page_state(&zones[ZONE_HIGHMEM], item) +
 #endif
-		zone_page_state(&zones[ZONE_DMA], item);
+		zone_page_state(&zones[ZONE_NORMAL], item);
 }
 
 extern void zone_statistics(struct zonelist *, struct zone *);
diff -puN mm/page_alloc.c~optional-zone_dma-in-the-vm mm/page_alloc.c
--- a/mm/page_alloc.c~optional-zone_dma-in-the-vm
+++ a/mm/page_alloc.c
@@ -71,7 +71,9 @@ static void __free_pages_ok(struct page 
  * don't need any ZONE_NORMAL reservation
  */
 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
+#ifdef CONFIG_ZONE_DMA
 	 256,
+#endif
 #ifdef CONFIG_ZONE_DMA32
 	 256,
 #endif
@@ -83,7 +85,9 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_Z
 EXPORT_SYMBOL(totalram_pages);
 
 static char *zone_names[MAX_NR_ZONES] = {
+#ifdef CONFIG_ZONE_DMA
 	 "DMA",
+#endif
 #ifdef CONFIG_ZONE_DMA32
 	 "DMA32",
 #endif
diff -puN mm/slab.c~optional-zone_dma-in-the-vm mm/slab.c
--- a/mm/slab.c~optional-zone_dma-in-the-vm
+++ a/mm/slab.c
@@ -1451,13 +1451,14 @@ void __init kmem_cache_init(void)
 					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
 					NULL, NULL);
 		}
-
+#ifdef CONFIG_ZONE_DMA
 		sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
 					sizes->cs_size,
 					ARCH_KMALLOC_MINALIGN,
 					ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
 						SLAB_PANIC,
 					NULL, NULL);
+#endif
 		sizes++;
 		names++;
 	}
@@ -2269,8 +2270,10 @@ kmem_cache_create (const char *name, siz
 	cachep->slab_size = slab_size;
 	cachep->flags = flags;
 	cachep->gfpflags = 0;
+#ifdef CONFIG_ZONE_DMA
 	if (flags & SLAB_CACHE_DMA)
 		cachep->gfpflags |= GFP_DMA;
+#endif
 	cachep->buffer_size = size;
 
 	if (flags & CFLGS_OFF_SLAB) {
diff -puN mm/vmstat.c~optional-zone_dma-in-the-vm mm/vmstat.c
--- a/mm/vmstat.c~optional-zone_dma-in-the-vm
+++ a/mm/vmstat.c
@@ -438,6 +438,12 @@ struct seq_operations fragmentation_op =
 	.show	= frag_show,
 };
 
+#ifdef CONFIG_ZONE_DMA
+#define TEXT_FOR_DMA(xx) xx "_dma",
+#else
+#define TEXT_FOR_DMA(xx)
+#endif
+
 #ifdef CONFIG_ZONE_DMA32
 #define TEXT_FOR_DMA32(xx) xx "_dma32",
 #else
@@ -450,7 +456,7 @@ struct seq_operations fragmentation_op =
 #define TEXT_FOR_HIGHMEM(xx)
 #endif
 
-#define TEXTS_FOR_ZONES(xx) xx "_dma", TEXT_FOR_DMA32(xx) xx "_normal", \
+#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
 					TEXT_FOR_HIGHMEM(xx)
 
 static char *vmstat_text[] = {
_

Patches currently in -mm which might be from clameter@xxxxxxx are

fix-longstanding-load-balancing-bug-in-the-scheduler.patch
cleanup-radix_tree_derefreplace_slot-calling-conventions-warning-fixes.patch
reduce-max_nr_zones-remove-two-strange-uses-of-max_nr_zones.patch
reduce-max_nr_zones-fix-max_nr_zones-array-initializations.patch
reduce-max_nr_zones-make-display-of-highmem-counters-conditional-on-config_highmem.patch
reduce-max_nr_zones-make-display-of-highmem-counters-conditional-on-config_highmem-tidy.patch
reduce-max_nr_zones-move-highmem-counters-into-highmemc-h.patch
reduce-max_nr_zones-move-highmem-counters-into-highmemc-h-fix.patch
reduce-max_nr_zones-page-allocator-zone_highmem-cleanup.patch
reduce-max_nr_zones-use-enum-to-define-zones-reformat-and-comment.patch
reduce-max_nr_zones-use-enum-to-define-zones-reformat-and-comment-cleanup.patch
reduce-max_nr_zones-make-zone_dma32-optional.patch
reduce-max_nr_zones-make-zone_highmem-optional.patch
reduce-max_nr_zones-make-zone_highmem-optional-fix.patch
reduce-max_nr_zones-make-zone_highmem-optional-fix-fix.patch
reduce-max_nr_zones-remove-display-of-counters-for-unconfigured-zones.patch
reduce-max_nr_zones-fix-i386-srat-check-for-max_nr_zones.patch
mempolicies-fix-policy_zone-check.patch
apply-type-enum-zone_type.patch
apply-type-enum-zone_type-fix.patch
linearly-index-zone-node_zonelists.patch
slab-respect-architecture-and-caller-mandated-alignment.patch
slab-optimize-kmalloc_node-the-same-way-as-kmalloc.patch
slab-optimize-kmalloc_node-the-same-way-as-kmalloc-fix.patch
slab-extract-__kmem_cache_destroy-from-kmem_cache_destroy.patch
slab-do-not-panic-when-alloc_kmemlist-fails-and-slab-is-up.patch
add-__gfp_thisnode-to-avoid-fallback-to-other-nodes-and-ignore.patch
add-__gfp_thisnode-to-avoid-fallback-to-other-nodes-and-ignore-fix.patch
sys_move_pages-do-not-fall-back-to-other-nodes.patch
guarantee-that-the-uncached-allocator-gets-pages-on-the-correct.patch
cleanup-add-zone-pointer-to-get_page_from_freelist.patch
profiling-require-buffer-allocation-on-the-correct-node.patch
define-easier-to-handle-gfp_thisnode.patch
optimize-free_one_page.patch
do-not-check-unpopulated-zones-for-draining-and-counter.patch
extract-the-allocpercpu-functions-from-the-slab-allocator.patch
replace-min_unmapped_ratio-by-min_unmapped_pages-in-struct-zone.patch
zvc-support-nr_slab_reclaimable--nr_slab_unreclaimable.patch
zone_reclaim-dynamic-slab-reclaim.patch
zone_reclaim-dynamic-slab-reclaim-tidy.patch
zone-reclaim-with-slab-avoid-unecessary-off-node-allocations.patch
hugepages-use-page_to_nid-rather-than-traversing-zone-pointers.patch
numa-add-zone_to_nid-function.patch
numa-add-zone_to_nid-function-update.patch
slab-fix-kmalloc_node-applying-memory-policies-if-nodeid-==-numa_node_id.patch
add-numa_build-definition-in-kernelh-to-avoid-ifdef.patch
disable-gfp_thisnode-in-the-non-numa-case.patch
gfp_thisnode-for-the-slab-allocator-v2.patch
add-node-to-zone-for-the-numa-case.patch
add-node-to-zone-for-the-numa-case-fix.patch
get-rid-of-zone_table.patch
get-rid-of-zone_table-fix.patch
do-not-allocate-pagesets-for-unpopulated-zones.patch
zone_statistics-use-hot-node-instead-of-cold-zone_pgdat.patch
deal-with-cases-of-zone_dma-meaning-the-first-zone.patch
introduce-config_zone_dma.patch
optional-zone_dma-in-the-vm.patch
optional-zone_dma-for-i386.patch
optional-zone_dma-for-x86_64.patch
optional-zone_dma-for-ia64.patch
remove-zone_dma-remains-from-parisc.patch
remove-zone_dma-remains-from-sh-sh64.patch
x86-implement-always-locked-bit-ops-for-memory-shared-with-an-smp-hypervisor.patch
scheduler-numa-aware-placement-of-sched_group_allnodes.patch
zvc-support-nr_slab_reclaimable--nr_slab_unreclaimable-swap_prefetch.patch
reduce-max_nr_zones-swap_prefetch-remove-incorrect-use-of-zone_highmem.patch
numa-add-zone_to_nid-function-swap_prefetch.patch
readahead-state-based-method-aging-accounting-apply-type-enum-zone_type-readahead.patch

-
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Kernel Newbies FAQ]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Photo]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux