On Wed, Jan 28, 2015 at 07:22:51PM +0300, Vladimir Davydov wrote: > +++ b/mm/slub.c > @@ -2007,6 +2007,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) > int pages; > int pobjects; > > + preempt_disable(); > do { > pages = 0; > pobjects = 0; > @@ -2040,6 +2041,14 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) > > } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) > != oldpage); > + if (unlikely(!s->cpu_partial)) { > + unsigned long flags; > + > + local_irq_save(flags); > + unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); > + local_irq_restore(flags); > + } > + preempt_enable(); > #endif > } > > @@ -3369,7 +3378,7 @@ EXPORT_SYMBOL(kfree); > * being allocated from last increasing the chance that the last objects > * are freed in them. > */ > -int __kmem_cache_shrink(struct kmem_cache *s) > +int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate) > { > int node; > int i; > @@ -3381,14 +3390,26 @@ int __kmem_cache_shrink(struct kmem_cache *s) > unsigned long flags; > int ret = 0; > > + if (deactivate) { > + /* > + * Disable empty slabs caching. Used to avoid pinning offline > + * memory cgroups by kmem pages that can be freed. > + */ > + s->cpu_partial = 0; > + s->min_partial = 0; > + > + /* > + * s->cpu_partial is checked locklessly (see put_cpu_partial), > + * so we have to make sure the change is visible. > + */ > + kick_all_cpus_sync(); > + } Argh! what the heck! and without a single mention in the changelog. Why are you spraying IPIs across the entire machine? Why isn't synchronize_sched() good enough, that would allow you to get rid of the local_irq_save/restore as well. -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>