There is no need in having ->name at offset 0, it's never user and is being followed but a very large RO (after init) array of size classes, so each time we need to lock pool->migrate_lock (which happens relatively often) we need to jump over that RO region. Swap ->migrate_lock and ->name. Move ->pages_allocated is modified relatively often, move it up right after RO size_classes arrays. We modify ->compaction_in_progress and ->stats only from compaction (defragmentation), which can run only on one CPU at a time and, depending on the case, may or may not be common. Move both to down. Signed-off-by: Sergey Senozhatsky <senozhatsky@xxxxxxxxxxxx> --- mm/zsmalloc.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 8972b4b56cd3..2280ea17796b 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -204,18 +204,15 @@ struct link_free { }; struct zs_pool { - const char *name; + /* protect page/zspage migration */ + rwlock_t migrate_lock; struct size_class *size_class[ZS_SIZE_CLASSES]; - struct kmem_cache *handle_cachep; - struct kmem_cache *zspage_cachep; atomic_long_t pages_allocated; - struct zs_pool_stats stats; - - /* Compact classes */ - struct shrinker *shrinker; + struct kmem_cache *handle_cachep; + struct kmem_cache *zspage_cachep; #ifdef CONFIG_ZSMALLOC_STAT struct dentry *stat_dentry; @@ -223,9 +220,14 @@ struct zs_pool { #ifdef CONFIG_COMPACTION struct work_struct free_work; #endif - /* protect page/zspage migration */ - rwlock_t migrate_lock; + const char *name; + + /* Compact classes */ + struct shrinker *shrinker; + /* Permit only once compaction at a time */ atomic_t compaction_in_progress; + /* compaction stats */ + struct zs_pool_stats stats; }; static void pool_write_unlock(struct zs_pool *pool) -- 2.48.1.262.g85cc9f2d1e-goog