From: chenqiwu <chenqiwu@xxxxxxxxxx> There are slub_cpu_partial() and slub_set_cpu_partial() APIs to wrap kmem_cache->cpu_partial. This patch will use the two APIs to replace kmem_cache->cpu_partial in slub code. Signed-off-by: chenqiwu <chenqiwu@xxxxxxxxxx> --- changes in v2: - rewrite the commit title. --- mm/slub.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 17dc00e..1eb888c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2284,7 +2284,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) if (oldpage) { pobjects = oldpage->pobjects; pages = oldpage->pages; - if (drain && pobjects > s->cpu_partial) { + if (drain && pobjects > slub_cpu_partial(s)) { unsigned long flags; /* * partial array is full. Move the existing @@ -2309,7 +2309,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); - if (unlikely(!s->cpu_partial)) { + if (unlikely(!slub_cpu_partial(s))) { unsigned long flags; local_irq_save(flags); @@ -3493,15 +3493,15 @@ static void set_cpu_partial(struct kmem_cache *s) * 50% to keep some capacity around for frees. */ if (!kmem_cache_has_cpu_partial(s)) - s->cpu_partial = 0; + slub_set_cpu_partial(s, 0); else if (s->size >= PAGE_SIZE) - s->cpu_partial = 2; + slub_set_cpu_partial(s, 2); else if (s->size >= 1024) - s->cpu_partial = 6; + slub_set_cpu_partial(s, 6); else if (s->size >= 256) - s->cpu_partial = 13; + slub_set_cpu_partial(s, 13); else - s->cpu_partial = 30; + slub_set_cpu_partial(s, 30); #endif } -- 1.9.1