+ numa-add-zone_to_nid-function.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled

     NUMA: Add zone_to_nid function

has been added to the -mm tree.  Its filename is

     numa-add-zone_to_nid-function.patch

See http://www.zip.com.au/~akpm/linux/patches/stuff/added-to-mm.txt to find
out what to do about this

------------------------------------------------------
Subject: NUMA: Add zone_to_nid function
From: Christoph Lameter <clameter@xxxxxxx>

There are many places where we need to determine the node of a zone. 
Currently we use a difficult to read sequence of pointer dereferencing. 
Put that into an inline function and use throughout VM.  Maybe we can find
a way to optimize the lookup in the future.

Signed-off-by: Christoph Lameter <clameter@xxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxx>
---

 arch/i386/mm/discontig.c |    2 +-
 arch/parisc/mm/init.c    |    2 +-
 include/linux/mm.h       |    7 ++++++-
 kernel/cpuset.c          |    4 ++--
 mm/hugetlb.c             |    2 +-
 mm/mempolicy.c           |    6 +++---
 mm/page_alloc.c          |    2 +-
 mm/vmscan.c              |    2 +-
 8 files changed, 16 insertions(+), 11 deletions(-)

diff -puN arch/i386/mm/discontig.c~numa-add-zone_to_nid-function arch/i386/mm/discontig.c
--- a/arch/i386/mm/discontig.c~numa-add-zone_to_nid-function
+++ a/arch/i386/mm/discontig.c
@@ -392,7 +392,7 @@ void __init set_highmem_pages_init(int b
 		zone_end_pfn = zone_start_pfn + zone->spanned_pages;
 
 		printk("Initializing %s for node %d (%08lx:%08lx)\n",
-				zone->name, zone->zone_pgdat->node_id,
+				zone->name, zone_to_nid(zone),
 				zone_start_pfn, zone_end_pfn);
 
 		for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) {
diff -puN arch/parisc/mm/init.c~numa-add-zone_to_nid-function arch/parisc/mm/init.c
--- a/arch/parisc/mm/init.c~numa-add-zone_to_nid-function
+++ a/arch/parisc/mm/init.c
@@ -548,7 +548,7 @@ void show_mem(void)
 
 				printk("Zone list for zone %d on node %d: ", j, i);
 				for (k = 0; zl->zones[k] != NULL; k++) 
-					printk("[%d/%s] ", zl->zones[k]->zone_pgdat->node_id, zl->zones[k]->name);
+					printk("[%d/%s] ", zone_to_nid(zl->zones[k]), zl->zones[k]->name);
 				printk("\n");
 			}
 		}
diff -puN include/linux/mm.h~numa-add-zone_to_nid-function include/linux/mm.h
--- a/include/linux/mm.h~numa-add-zone_to_nid-function
+++ a/include/linux/mm.h
@@ -499,12 +499,17 @@ static inline struct zone *page_zone(str
 	return zone_table[page_zone_id(page)];
 }
 
+static inline unsigned long zone_to_nid(struct zone *zone)
+{
+	return zone->zone_pgdat->node_id;
+}
+
 static inline unsigned long page_to_nid(struct page *page)
 {
 	if (FLAGS_HAS_NODE)
 		return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
 	else
-		return page_zone(page)->zone_pgdat->node_id;
+		return zone_to_nid(page_zone(page));
 }
 static inline unsigned long page_to_section(struct page *page)
 {
diff -puN kernel/cpuset.c~numa-add-zone_to_nid-function kernel/cpuset.c
--- a/kernel/cpuset.c~numa-add-zone_to_nid-function
+++ a/kernel/cpuset.c
@@ -2245,7 +2245,7 @@ int cpuset_zonelist_valid_mems_allowed(s
 	int i;
 
 	for (i = 0; zl->zones[i]; i++) {
-		int nid = zl->zones[i]->zone_pgdat->node_id;
+		int nid = zone_to_nid(zl->zones[i]);
 
 		if (node_isset(nid, current->mems_allowed))
 			return 1;
@@ -2318,7 +2318,7 @@ int __cpuset_zone_allowed(struct zone *z
 
 	if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
 		return 1;
-	node = z->zone_pgdat->node_id;
+	node = zone_to_nid(z);
 	might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
 	if (node_isset(node, current->mems_allowed))
 		return 1;
diff -puN mm/hugetlb.c~numa-add-zone_to_nid-function mm/hugetlb.c
--- a/mm/hugetlb.c~numa-add-zone_to_nid-function
+++ a/mm/hugetlb.c
@@ -72,7 +72,7 @@ static struct page *dequeue_huge_page(st
 	struct zone **z;
 
 	for (z = zonelist->zones; *z; z++) {
-		nid = (*z)->zone_pgdat->node_id;
+		nid = zone_to_nid(*z);
 		if (cpuset_zone_allowed(*z, GFP_HIGHUSER) &&
 		    !list_empty(&hugepage_freelists[nid]))
 			break;
diff -puN mm/mempolicy.c~numa-add-zone_to_nid-function mm/mempolicy.c
--- a/mm/mempolicy.c~numa-add-zone_to_nid-function
+++ a/mm/mempolicy.c
@@ -487,7 +487,7 @@ static void get_zonemask(struct mempolic
 	switch (p->policy) {
 	case MPOL_BIND:
 		for (i = 0; p->v.zonelist->zones[i]; i++)
-			node_set(p->v.zonelist->zones[i]->zone_pgdat->node_id,
+			node_set(zone_to_nid(p->v.zonelist->zones[i]),
 				*nodes);
 		break;
 	case MPOL_DEFAULT:
@@ -1145,7 +1145,7 @@ unsigned slab_node(struct mempolicy *pol
 		 * Follow bind policy behavior and start allocation at the
 		 * first node.
 		 */
-		return policy->v.zonelist->zones[0]->zone_pgdat->node_id;
+		return zone_to_nid(policy->v.zonelist->zones[0]);
 
 	case MPOL_PREFERRED:
 		if (policy->v.preferred_node >= 0)
@@ -1649,7 +1649,7 @@ void mpol_rebind_policy(struct mempolicy
 
 		nodes_clear(nodes);
 		for (z = pol->v.zonelist->zones; *z; z++)
-			node_set((*z)->zone_pgdat->node_id, nodes);
+			node_set(zone_to_nid(*z), nodes);
 		nodes_remap(tmp, nodes, *mpolmask, *newmask);
 		nodes = tmp;
 
diff -puN mm/page_alloc.c~numa-add-zone_to_nid-function mm/page_alloc.c
--- a/mm/page_alloc.c~numa-add-zone_to_nid-function
+++ a/mm/page_alloc.c
@@ -1251,7 +1251,7 @@ unsigned int nr_free_pagecache_pages(voi
 #ifdef CONFIG_NUMA
 static void show_node(struct zone *zone)
 {
-	printk("Node %d ", zone->zone_pgdat->node_id);
+	printk("Node %ld ", zone_to_nid(zone));
 }
 #else
 #define show_node(zone)	do { } while (0)
diff -puN mm/vmscan.c~numa-add-zone_to_nid-function mm/vmscan.c
--- a/mm/vmscan.c~numa-add-zone_to_nid-function
+++ a/mm/vmscan.c
@@ -1661,7 +1661,7 @@ int zone_reclaim(struct zone *zone, gfp_
 	 * over remote processors and spread off node memory allocations
 	 * as wide as possible.
 	 */
-	node_id = zone->zone_pgdat->node_id;
+	node_id = zone_to_nid(zone);
 	mask = node_to_cpumask(node_id);
 	if (!cpus_empty(mask) && node_id != numa_node_id())
 		return 0;
_

Patches currently in -mm which might be from clameter@xxxxxxx are

page-migration-replace-radix_tree_lookup_slot-with-radix_tree_lockup.patch
reduce-max_nr_zones-remove-two-strange-uses-of-max_nr_zones.patch
reduce-max_nr_zones-fix-max_nr_zones-array-initializations.patch
reduce-max_nr_zones-make-display-of-highmem-counters-conditional-on-config_highmem.patch
reduce-max_nr_zones-make-display-of-highmem-counters-conditional-on-config_highmem-tidy.patch
reduce-max_nr_zones-move-highmem-counters-into-highmemc-h.patch
reduce-max_nr_zones-move-highmem-counters-into-highmemc-h-fix.patch
reduce-max_nr_zones-page-allocator-zone_highmem-cleanup.patch
reduce-max_nr_zones-use-enum-to-define-zones-reformat-and-comment.patch
reduce-max_nr_zones-use-enum-to-define-zones-reformat-and-comment-cleanup.patch
reduce-max_nr_zones-make-zone_dma32-optional.patch
reduce-max_nr_zones-make-zone_highmem-optional.patch
reduce-max_nr_zones-make-zone_highmem-optional-fix.patch
reduce-max_nr_zones-make-zone_highmem-optional-fix-fix.patch
reduce-max_nr_zones-remove-display-of-counters-for-unconfigured-zones.patch
reduce-max_nr_zones-fix-i386-srat-check-for-max_nr_zones.patch
mempolicies-fix-policy_zone-check.patch
apply-type-enum-zone_type.patch
apply-type-enum-zone_type-fix.patch
linearly-index-zone-node_zonelists.patch
slab-respect-architecture-and-caller-mandated-alignment.patch
slab-optimize-kmalloc_node-the-same-way-as-kmalloc.patch
slab-optimize-kmalloc_node-the-same-way-as-kmalloc-fix.patch
slab-extract-__kmem_cache_destroy-from-kmem_cache_destroy.patch
slab-do-not-panic-when-alloc_kmemlist-fails-and-slab-is-up.patch
add-__gfp_thisnode-to-avoid-fallback-to-other-nodes-and-ignore.patch
add-__gfp_thisnode-to-avoid-fallback-to-other-nodes-and-ignore-fix.patch
sys_move_pages-do-not-fall-back-to-other-nodes.patch
guarantee-that-the-uncached-allocator-gets-pages-on-the-correct.patch
cleanup-add-zone-pointer-to-get_page_from_freelist.patch
profiling-require-buffer-allocation-on-the-correct-node.patch
define-easier-to-handle-gfp_thisnode.patch
optimize-free_one_page.patch
do-not-check-unpopulated-zones-for-draining-and-counter.patch
extract-the-allocpercpu-functions-from-the-slab-allocator.patch
replace-min_unmapped_ratio-by-min_unmapped_pages-in-struct-zone.patch
zvc-support-nr_slab_reclaimable--nr_slab_unreclaimable.patch
zone_reclaim-dynamic-slab-reclaim.patch
zone_reclaim-dynamic-slab-reclaim-tidy.patch
zone-reclaim-with-slab-avoid-unecessary-off-node-allocations.patch
hugepages-use-page_to_nid-rather-than-traversing-zone-pointers.patch
numa-add-zone_to_nid-function.patch
x86-implement-always-locked-bit-ops-for-memory-shared-with-an-smp-hypervisor.patch
zvc-support-nr_slab_reclaimable--nr_slab_unreclaimable-swap_prefetch.patch
reduce-max_nr_zones-swap_prefetch-remove-incorrect-use-of-zone_highmem.patch
numa-add-zone_to_nid-function.patch
readahead-state-based-method-aging-accounting-apply-type-enum-zone_type-readahead.patch

-
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Kernel Newbies FAQ]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Photo]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux