The page field will go away and so its more convenient to pass the page struct to kmem_cache_cpu instead. Signed-off-by: Christoph Lameter <cl@xxxxxxxxx> --- mm/slub.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2011-09-02 08:20:39.221219372 -0500 +++ linux-2.6/mm/slub.c 2011-09-02 08:20:45.611219333 -0500 @@ -1951,10 +1951,10 @@ static void flush_all(struct kmem_cache * Check if the objects in a per cpu structure fit numa * locality expectations. */ -static inline int node_match(struct kmem_cache_cpu *c, int node) +static inline int node_match(struct page *page, int node) { #ifdef CONFIG_NUMA - if (node != NUMA_NO_NODE && page_to_nid(c->page) != node) + if (node != NUMA_NO_NODE && page_to_nid(page) != node) return 0; #endif return 1; @@ -2095,7 +2095,7 @@ static void *__slab_alloc(struct kmem_ca goto new_slab; - if (unlikely(!node_match(c, node))) { + if (unlikely(!node_match(page, node))) { stat(s, ALLOC_NODE_MISMATCH); deactivate_slab(s, page, freelist); c->page = NULL; @@ -2189,6 +2189,7 @@ static __always_inline void *slab_alloc( { void **object; struct kmem_cache_cpu *c; + struct page *page; unsigned long tid; if (slab_pre_alloc_hook(s, gfpflags)) @@ -2214,7 +2215,8 @@ redo: barrier(); object = c->freelist; - if (unlikely(!object || !node_match(c, node))) + page = c->page; + if (unlikely(!object || !node_match(page, node))) object = __slab_alloc(s, gfpflags, node, addr, c); -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>