+ mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     mm: introduce node_zonelist() for accessing the zonelist for a GFP mask
has been added to the -mm tree.  Its filename is
     mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask.patch

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/SubmitChecklist when testing your code ***

See http://www.zip.com.au/~akpm/linux/patches/stuff/added-to-mm.txt to find
out what to do about this

The current -mm tree may be found at http://userweb.kernel.org/~akpm/mmotm/

------------------------------------------------------
Subject: mm: introduce node_zonelist() for accessing the zonelist for a GFP mask
From: Mel Gorman <mel@xxxxxxxxx>

Introduce a node_zonelist() helper function.  It is used to lookup the
appropriate zonelist given a node and a GFP mask.  The patch on its own is a
cleanup but it helps clarify parts of the two-zonelist-per-node patchset.  If
necessary, it can be merged with the next patch in this set without problems.

Reviewed-by: Christoph Lameter <clameter@xxxxxxx>
Signed-off-by: Mel Gorman <mel@xxxxxxxxx>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@xxxxxx>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@xxxxxxxxxxxxxx>
Cc: Mel Gorman <mel@xxxxxxxxx>
Cc: Christoph Lameter <clameter@xxxxxxx>
Cc: Hugh Dickins <hugh@xxxxxxxxxxx>
Cc: Nick Piggin <nickpiggin@xxxxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 drivers/char/sysrq.c      |    3 +--
 fs/buffer.c               |    6 +++---
 include/linux/gfp.h       |    8 ++++++--
 include/linux/mempolicy.h |    2 +-
 mm/mempolicy.c            |    6 +++---
 mm/page_alloc.c           |    3 +--
 mm/slab.c                 |    3 +--
 mm/slub.c                 |    3 +--
 8 files changed, 17 insertions(+), 17 deletions(-)

diff -puN drivers/char/sysrq.c~mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask drivers/char/sysrq.c
--- a/drivers/char/sysrq.c~mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask
+++ a/drivers/char/sysrq.c
@@ -271,8 +271,7 @@ static struct sysrq_key_op sysrq_term_op
 
 static void moom_callback(struct work_struct *ignored)
 {
-	out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL],
-			GFP_KERNEL, 0);
+	out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0);
 }
 
 static DECLARE_WORK(moom_work, moom_callback);
diff -puN fs/buffer.c~mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask fs/buffer.c
--- a/fs/buffer.c~mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask
+++ a/fs/buffer.c
@@ -361,13 +361,13 @@ void invalidate_bdev(struct block_device
 static void free_more_memory(void)
 {
 	struct zonelist *zonelist;
-	pg_data_t *pgdat;
+	int nid;
 
 	wakeup_pdflush(1024);
 	yield();
 
-	for_each_online_pgdat(pgdat) {
-		zonelist = &pgdat->node_zonelists[gfp_zone(GFP_NOFS)];
+	for_each_online_node(nid) {
+		zonelist = node_zonelist(nid, GFP_NOFS);
 		if (zonelist->zones[0])
 			try_to_free_pages(zonelist, 0, GFP_NOFS);
 	}
diff -puN include/linux/gfp.h~mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask include/linux/gfp.h
--- a/include/linux/gfp.h~mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask
+++ a/include/linux/gfp.h
@@ -154,10 +154,15 @@ static inline enum zone_type gfp_zone(gf
 /*
  * We get the zone list from the current node and the gfp_mask.
  * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
+ * There are many zonelists per node, two for each active zone.
  *
  * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
  * optimized to &contig_page_data at compile-time.
  */
+static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
+{
+	return NODE_DATA(nid)->node_zonelists + gfp_zone(flags);
+}
 
 #ifndef HAVE_ARCH_FREE_PAGE
 static inline void arch_free_page(struct page *page, int order) { }
@@ -178,8 +183,7 @@ static inline struct page *alloc_pages_n
 	if (nid < 0)
 		nid = numa_node_id();
 
-	return __alloc_pages(gfp_mask, order,
-		NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_mask));
+	return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
 }
 
 #ifdef CONFIG_NUMA
diff -puN include/linux/mempolicy.h~mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask include/linux/mempolicy.h
--- a/include/linux/mempolicy.h~mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask
+++ a/include/linux/mempolicy.h
@@ -241,7 +241,7 @@ static inline void mpol_fix_fork_child_f
 static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
  		unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol)
 {
-	return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags);
+	return node_zonelist(0, gfp_flags);
 }
 
 static inline int do_migrate_pages(struct mm_struct *mm,
diff -puN mm/mempolicy.c~mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask mm/mempolicy.c
--- a/mm/mempolicy.c~mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask
+++ a/mm/mempolicy.c
@@ -1183,7 +1183,7 @@ static struct zonelist *zonelist_policy(
 		nd = 0;
 		BUG();
 	}
-	return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
+	return node_zonelist(nd, gfp);
 }
 
 /* Do dynamic interleaving for a process */
@@ -1297,7 +1297,7 @@ struct zonelist *huge_zonelist(struct vm
 
 		nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
 		__mpol_free(pol);		/* finished with pol */
-		return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
+		return node_zonelist(nid, gfp_flags);
 	}
 
 	zl = zonelist_policy(GFP_HIGHUSER, pol);
@@ -1319,7 +1319,7 @@ static struct page *alloc_page_interleav
 	struct zonelist *zl;
 	struct page *page;
 
-	zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
+	zl = node_zonelist(nid, gfp);
 	page = __alloc_pages(gfp, order, zl);
 	if (page && page_zone(page) == zl->zones[0])
 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
diff -puN mm/page_alloc.c~mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask mm/page_alloc.c
--- a/mm/page_alloc.c~mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask
+++ a/mm/page_alloc.c
@@ -1712,10 +1712,9 @@ EXPORT_SYMBOL(free_pages);
 static unsigned int nr_free_zone_pages(int offset)
 {
 	/* Just pick one node, since fallback list is circular */
-	pg_data_t *pgdat = NODE_DATA(numa_node_id());
 	unsigned int sum = 0;
 
-	struct zonelist *zonelist = pgdat->node_zonelists + offset;
+	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
 	struct zone **zonep = zonelist->zones;
 	struct zone *zone;
 
diff -puN mm/slab.c~mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask mm/slab.c
--- a/mm/slab.c~mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask
+++ a/mm/slab.c
@@ -3251,8 +3251,7 @@ static void *fallback_alloc(struct kmem_
 	if (flags & __GFP_THISNODE)
 		return NULL;
 
-	zonelist = &NODE_DATA(slab_node(current->mempolicy))
-			->node_zonelists[gfp_zone(flags)];
+	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
 
 retry:
diff -puN mm/slub.c~mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask mm/slub.c
--- a/mm/slub.c~mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask
+++ a/mm/slub.c
@@ -1317,8 +1317,7 @@ static struct page *get_any_partial(stru
 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
 		return NULL;
 
-	zonelist = &NODE_DATA(
-		slab_node(current->mempolicy))->node_zonelists[gfp_zone(flags)];
+	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
 	for (z = zonelist->zones; *z; z++) {
 		struct kmem_cache_node *n;
 
_

Patches currently in -mm which might be from mel@xxxxxxxxx are

remove-set_migrateflags.patch
mm-use-zonelists-instead-of-zones-when-direct-reclaiming-pages.patch
mm-introduce-node_zonelist-for-accessing-the-zonelist-for-a-gfp-mask.patch
mm-remember-what-the-preferred-zone-is-for-zone_statistics.patch
mm-use-two-zonelist-that-are-filtered-by-gfp-mask.patch
mm-have-zonelist-contains-structs-with-both-a-zone-pointer-and-zone_idx.patch
mm-filter-based-on-a-nodemask-as-well-as-a-gfp_mask.patch
memory-hotplug-add-removable-to-sysfs-to-show-memblock-removability.patch
page-owner-tracking-leak-detector.patch
add-debugging-aid-for-memory-initialisation-problems.patch

--
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Kernel Newbies FAQ]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Photo]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux