Don't do a minus action in get_partial_node function here, since it is always zero. Signed-off-by: Alex Shi <alex.shi@xxxxxxxxx> --- mm/slub.c | 3 +-- 1 files changed, 1 insertions(+), 2 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 492beab..eb36a6b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1613,7 +1613,7 @@ static void *get_partial_node(struct kmem_cache *s, spin_lock(&n->list_lock); list_for_each_entry_safe(page, page2, &n->partial, lru) { void *t = acquire_slab(s, n, page, object == NULL); - int available; + int available = 0; if (!t) continue; @@ -1623,7 +1623,6 @@ static void *get_partial_node(struct kmem_cache *s, c->node = page_to_nid(page); stat(s, ALLOC_FROM_PARTIAL); object = t; - available = page->objects - page->inuse; } else { page->freelist = t; available = put_cpu_partial(s, page, 0); -- 1.7.0 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>