Add a new helper function get_first_slab() that get the first slab from a kmem_cache_node. Signed-off-by: Geliang Tang <geliangtang@xxxxxxx> --- mm/slab.c | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index 925921e..2463b57 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2754,6 +2754,21 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, #define cache_free_debugcheck(x,objp,z) (objp) #endif +static struct page *get_first_slab(struct kmem_cache_node *n) +{ + struct page *page; + + page = list_first_entry_or_null(&n->slabs_partial, + struct page, lru); + if (!page) { + n->free_touched = 1; + page = list_first_entry_or_null(&n->slabs_free, + struct page, lru); + } + + return page; +} + static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, bool force_refill) { @@ -2791,15 +2806,9 @@ retry: while (batchcount > 0) { struct page *page; /* Get slab alloc is to come from. */ - page = list_first_entry_or_null(&n->slabs_partial, - struct page, lru); - if (!page) { - n->free_touched = 1; - page = list_first_entry_or_null(&n->slabs_free, - struct page, lru); - if (!page) - goto must_grow; - } + page = get_first_slab(n); + if (!page) + goto must_grow; check_spinlock_acquired(cachep); @@ -3095,15 +3104,9 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, retry: check_irq_off(); spin_lock(&n->list_lock); - page = list_first_entry_or_null(&n->slabs_partial, - struct page, lru); - if (!page) { - n->free_touched = 1; - page = list_first_entry_or_null(&n->slabs_free, - struct page, lru); - if (!page) - goto must_grow; - } + page = get_first_slab(n); + if (!page) + goto must_grow; check_spinlock_acquired_node(cachep, nodeid); -- 2.5.0 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>