- reduce-max_nr_zones-use-enum-to-define-zones-reformat-and-comment.patch removed from -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled

     reduce MAX_NR_ZONES: use enum to define zones, reformat and comment

has been removed from the -mm tree.  Its filename is

     reduce-max_nr_zones-use-enum-to-define-zones-reformat-and-comment.patch

This patch was dropped because it was folded into reduce-max_nr_zones-page-allocator-zone_highmem-cleanup.patch

------------------------------------------------------
Subject: reduce MAX_NR_ZONES: use enum to define zones, reformat and comment
From: Christoph Lameter <clameter@xxxxxxx>

Use enum for zones and reformat zones dependent information

Add comments explaning the use of zones and add a zones_t type for zone
numbers.

Line up information that will be #ifdefd by the following patches.

Signed-off-by: Christoph Lameter <clameter@xxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxx>
---

 include/linux/mm.h     |    7 ++--
 include/linux/mmzone.h |   66 +++++++++++++++++++++++++++------------
 mm/page_alloc.c        |   23 ++++++++++---
 3 files changed, 68 insertions(+), 28 deletions(-)

diff -puN include/linux/mm.h~reduce-max_nr_zones-use-enum-to-define-zones-reformat-and-comment include/linux/mm.h
--- a/include/linux/mm.h~reduce-max_nr_zones-use-enum-to-define-zones-reformat-and-comment
+++ a/include/linux/mm.h
@@ -470,7 +470,7 @@ void split_page(struct page *page, unsig
 #define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
 #define ZONETABLE_MASK		((1UL << ZONETABLE_SHIFT) - 1)
 
-static inline unsigned long page_zonenum(struct page *page)
+static inline zones_t page_zonenum(struct page *page)
 {
 	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
 }
@@ -499,11 +499,12 @@ static inline unsigned long page_to_sect
 	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
 }
 
-static inline void set_page_zone(struct page *page, unsigned long zone)
+static inline void set_page_zone(struct page *page, zones_t zone)
 {
 	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
 	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
 }
+
 static inline void set_page_node(struct page *page, unsigned long node)
 {
 	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
@@ -515,7 +516,7 @@ static inline void set_page_section(stru
 	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
 }
 
-static inline void set_page_links(struct page *page, unsigned long zone,
+static inline void set_page_links(struct page *page, zones_t zone,
 	unsigned long node, unsigned long pfn)
 {
 	set_page_zone(page, zone);
diff -puN include/linux/mmzone.h~reduce-max_nr_zones-use-enum-to-define-zones-reformat-and-comment include/linux/mmzone.h
--- a/include/linux/mmzone.h~reduce-max_nr_zones-use-enum-to-define-zones-reformat-and-comment
+++ a/include/linux/mmzone.h
@@ -88,14 +88,53 @@ struct per_cpu_pageset {
 #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
 #endif
 
-#define ZONE_DMA		0
-#define ZONE_DMA32		1
-#define ZONE_NORMAL		2
-#define ZONE_HIGHMEM		3
+typedef enum {
+	/*
+	 * ZONE_DMA is used when there are devices that are not able
+	 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
+	 * carve out the portion of memory that is needed for these devices.
+	 * The range is arch specific.
+	 *
+	 * Some examples
+	 *
+	 * Architecture		Limit
+	 * ---------------------------
+	 * parisc, ia64, sparc	<4G
+	 * s390			<2G
+	 * arm26		<48M
+	 * arm			Various
+	 * alpha		Unlimited or 0-16MB.
+	 *
+	 * i386, x86_64 and multiple other arches
+	 * 			<16M.
+	 */
+	ZONE_DMA,
+	/*
+	 * x86_64 needs two ZONE_DMAs because it supports devices that are
+	 * only able to do DMA to the lower 16M but also 32 bit devices that
+	 * can only do DMA areas below 4G.
+	 */
+	ZONE_DMA32,
+	/*
+	 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
+	 * performed on pages in ZONE_NORMAL if the DMA devices support
+	 * transfers to all addressable memory.
+	 */
+	ZONE_NORMAL,
+	/*
+	 * A memory area that is only addressable by the kernel through
+	 * mapping portions into its own address space. This is for example
+	 * used by i386 to allow the kernel to address the memory beyond
+	 * 900MB. The kernel will set up special mappings (page
+	 * table entries on i386) for each page that the kernel needs to
+	 * access.
+	 */
+	ZONE_HIGHMEM,
 
-#define MAX_NR_ZONES		4	/* Sync this with ZONES_SHIFT */
-#define ZONES_SHIFT		2	/* ceil(log2(MAX_NR_ZONES)) */
+	MAX_NR_ZONES
+} zones_t;
 
+#define	ZONES_SHIFT		2	/* ceil(log2(MAX_NR_ZONES)) */
 
 /*
  * When a memory allocation must conform to specific limitations (such
@@ -126,16 +165,6 @@ struct per_cpu_pageset {
 /* #define GFP_ZONETYPES       (GFP_ZONEMASK + 1) */           /* Non-loner */
 #define GFP_ZONETYPES  ((GFP_ZONEMASK + 1) / 2 + 1)            /* Loner */
 
-/*
- * On machines where it is needed (eg PCs) we divide physical memory
- * into multiple physical zones. On a 32bit PC we have 4 zones:
- *
- * ZONE_DMA	  < 16 MB	ISA DMA capable memory
- * ZONE_DMA32	     0 MB 	Empty
- * ZONE_NORMAL	16-896 MB	direct mapped by the kernel
- * ZONE_HIGHMEM	 > 896 MB	only page cache and user processes
- */
-
 struct zone {
 	/* Fields commonly accessed by the page allocator */
 	unsigned long		free_pages;
@@ -266,7 +295,6 @@ struct zone {
 	char			*name;
 } ____cacheline_internodealigned_in_smp;
 
-
 /*
  * The "priority" of VM scanning is how much of the queues we will scan in one
  * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
@@ -373,12 +401,12 @@ static inline int populated_zone(struct 
 	return (!!zone->present_pages);
 }
 
-static inline int is_highmem_idx(int idx)
+static inline int is_highmem_idx(zones_t idx)
 {
 	return (idx == ZONE_HIGHMEM);
 }
 
-static inline int is_normal_idx(int idx)
+static inline int is_normal_idx(zones_t idx)
 {
 	return (idx == ZONE_NORMAL);
 }
diff -puN mm/page_alloc.c~reduce-max_nr_zones-use-enum-to-define-zones-reformat-and-comment mm/page_alloc.c
--- a/mm/page_alloc.c~reduce-max_nr_zones-use-enum-to-define-zones-reformat-and-comment
+++ a/mm/page_alloc.c
@@ -68,7 +68,11 @@ static void __free_pages_ok(struct page 
  * TBD: should special case ZONE_DMA32 machines here - in those we normally
  * don't need any ZONE_NORMAL reservation
  */
-int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 };
+int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
+	 256,
+	 256,
+	 32
+};
 
 EXPORT_SYMBOL(totalram_pages);
 
@@ -79,7 +83,13 @@ EXPORT_SYMBOL(totalram_pages);
 struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
 EXPORT_SYMBOL(zone_table);
 
-static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" };
+static char *zone_names[MAX_NR_ZONES] = {
+	 "DMA",
+	 "DMA32",
+	 "Normal",
+	 "HighMem"
+};
+
 int min_free_kbytes = 1024;
 
 unsigned long __meminitdata nr_kernel_pages;
@@ -1487,7 +1497,8 @@ static void __meminit build_zonelists(pg
 
 static void __meminit build_zonelists(pg_data_t *pgdat)
 {
-	int i, j, k, node, local_node;
+	int i, node, local_node;
+	zones_t k,j;
 
 	local_node = pgdat->node_id;
 	for (i = 0; i < GFP_ZONETYPES; i++) {
@@ -1675,8 +1686,8 @@ void zone_init_free_lists(struct pglist_
 }
 
 #define ZONETABLE_INDEX(x, zone_nr)	((x << ZONES_SHIFT) | zone_nr)
-void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
-		unsigned long size)
+void zonetable_add(struct zone *zone, int nid, zones_t zid,
+		unsigned long pfn, unsigned long size)
 {
 	unsigned long snum = pfn_to_section_nr(pfn);
 	unsigned long end = pfn_to_section_nr(pfn + size);
@@ -1958,7 +1969,7 @@ __meminit int init_currently_empty_zone(
 static void __meminit free_area_init_core(struct pglist_data *pgdat,
 		unsigned long *zones_size, unsigned long *zholes_size)
 {
-	unsigned long j;
+	zones_t j;
 	int nid = pgdat->node_id;
 	unsigned long zone_start_pfn = pgdat->node_start_pfn;
 	int ret;
_

Patches currently in -mm which might be from clameter@xxxxxxx are

origin.patch
fix-longstanding-load-balancing-bug-in-the-scheduler.patch
reduce-max_nr_zones-remove-two-strange-uses-of-max_nr_zones.patch
reduce-max_nr_zones-fix-max_nr_zones-array-initializations.patch
reduce-max_nr_zones-make-display-of-highmem-counters-conditional-on-config_highmem.patch
reduce-max_nr_zones-move-highmem-counters-into-highmemc-h.patch
reduce-max_nr_zones-page-allocator-zone_highmem-cleanup.patch
reduce-max_nr_zones-use-enum-to-define-zones-reformat-and-comment.patch
reduce-max_nr_zones-use-enum-to-define-zones-reformat-and-comment-cleanup.patch
reduce-max_nr_zones-make-zone_dma32-optional.patch
reduce-max_nr_zones-make-zone_highmem-optional.patch
reduce-max_nr_zones-make-zone_highmem-optional-fix.patch
reduce-max_nr_zones-make-zone_highmem-optional-fix-fix.patch
reduce-max_nr_zones-remove-display-of-counters-for-unconfigured-zones.patch
reduce-max_nr_zones-fix-i386-srat-check-for-max_nr_zones.patch
mempolicies-fix-policy_zone-check.patch
apply-type-enum-zone_type.patch
apply-type-enum-zone_type-fix.patch
linearly-index-zone-node_zonelists.patch
slab-respect-architecture-and-caller-mandated-alignment.patch
slab-optimize-kmalloc_node-the-same-way-as-kmalloc.patch
slab-optimize-kmalloc_node-the-same-way-as-kmalloc-fix.patch
slab-extract-__kmem_cache_destroy-from-kmem_cache_destroy.patch
slab-do-not-panic-when-alloc_kmemlist-fails-and-slab-is-up.patch
add-__gfp_thisnode-to-avoid-fallback-to-other-nodes-and-ignore.patch
add-__gfp_thisnode-to-avoid-fallback-to-other-nodes-and-ignore-fix.patch
sys_move_pages-do-not-fall-back-to-other-nodes.patch
guarantee-that-the-uncached-allocator-gets-pages-on-the-correct.patch
cleanup-add-zone-pointer-to-get_page_from_freelist.patch
profiling-require-buffer-allocation-on-the-correct-node.patch
define-easier-to-handle-gfp_thisnode.patch
optimize-free_one_page.patch
do-not-check-unpopulated-zones-for-draining-and-counter.patch
extract-the-allocpercpu-functions-from-the-slab-allocator.patch
replace-min_unmapped_ratio-by-min_unmapped_pages-in-struct-zone.patch
zvc-support-nr_slab_reclaimable--nr_slab_unreclaimable.patch
zone_reclaim-dynamic-slab-reclaim.patch
zone_reclaim-dynamic-slab-reclaim-tidy.patch
zone-reclaim-with-slab-avoid-unecessary-off-node-allocations.patch
hugepages-use-page_to_nid-rather-than-traversing-zone-pointers.patch
numa-add-zone_to_nid-function.patch
numa-add-zone_to_nid-function-update.patch
slab-fix-kmalloc_node-applying-memory-policies-if-nodeid-==-numa_node_id.patch
add-numa_build-definition-in-kernelh-to-avoid-ifdef.patch
disable-gfp_thisnode-in-the-non-numa-case.patch
gfp_thisnode-for-the-slab-allocator-v2.patch
gfp_thisnode-for-the-slab-allocator-v2-fix-3.patch
add-node-to-zone-for-the-numa-case.patch
add-node-to-zone-for-the-numa-case-fix.patch
get-rid-of-zone_table.patch
get-rid-of-zone_table-fix.patch
do-not-allocate-pagesets-for-unpopulated-zones.patch
zone_statistics-use-hot-node-instead-of-cold-zone_pgdat.patch
deal-with-cases-of-zone_dma-meaning-the-first-zone.patch
introduce-config_zone_dma.patch
optional-zone_dma-in-the-vm.patch
optional-zone_dma-for-i386.patch
optional-zone_dma-for-x86_64.patch
optional-zone_dma-for-ia64.patch
remove-zone_dma-remains-from-parisc.patch
remove-zone_dma-remains-from-sh-sh64.patch
radix-tree-rcu-lockless-readside.patch
remove-zone_dma-remains-from-avr32.patch
x86-implement-always-locked-bit-ops-for-memory-shared-with-an-smp-hypervisor.patch
scheduler-numa-aware-placement-of-sched_group_allnodes.patch
zvc-support-nr_slab_reclaimable--nr_slab_unreclaimable-swap_prefetch.patch
reduce-max_nr_zones-swap_prefetch-remove-incorrect-use-of-zone_highmem.patch
numa-add-zone_to_nid-function-swap_prefetch.patch
readahead-state-based-method-aging-accounting-apply-type-enum-zone_type-readahead.patch

-
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Kernel Newbies FAQ]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Photo]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux