From: Xiubo Li <xiubli@xxxxxxxxxx> There could be huge number of capsnap queued in a short time, on x86_64 it's 248 bytes, which will be rounded up to 256 bytes by kzalloc. Move this to a dedicated slabcache to save 8 bytes for each. For the kmalloc-256 slab cache, the actual size will be 512 bytes: kmalloc-256 21797 74656 512 32 4 : tunables, etc For a dedicated slab cache the real size is 312 bytes: ceph_cap_snap 0 0 312 52 4 : tunables, etc So actually we can save 200 bytes for each. Signed-off-by: Xiubo Li <xiubli@xxxxxxxxxx> --- fs/ceph/snap.c | 5 +++-- fs/ceph/super.c | 7 +++++++ fs/ceph/super.h | 2 +- include/linux/ceph/libceph.h | 1 + 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index b41e6724c591..c787775eaf2a 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -482,7 +482,7 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci) struct ceph_buffer *old_blob = NULL; int used, dirty; - capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); + capsnap = kmem_cache_alloc(ceph_cap_snap_cachep, GFP_NOFS); if (!capsnap) { pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode); return; @@ -603,7 +603,8 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci) spin_unlock(&ci->i_ceph_lock); ceph_buffer_put(old_blob); - kfree(capsnap); + if (capsnap) + kmem_cache_free(ceph_cap_snap_cachep, capsnap); ceph_put_snap_context(old_snapc); } diff --git a/fs/ceph/super.c b/fs/ceph/super.c index bf79f369aec6..978463fa822c 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -864,6 +864,7 @@ static void destroy_fs_client(struct ceph_fs_client *fsc) */ struct kmem_cache *ceph_inode_cachep; struct kmem_cache *ceph_cap_cachep; +struct kmem_cache *ceph_cap_snap_cachep; struct kmem_cache *ceph_cap_flush_cachep; struct kmem_cache *ceph_dentry_cachep; struct kmem_cache *ceph_file_cachep; @@ -892,6 +893,9 @@ static int __init init_caches(void) ceph_cap_cachep = KMEM_CACHE(ceph_cap, SLAB_MEM_SPREAD); if (!ceph_cap_cachep) goto bad_cap; + ceph_cap_snap_cachep = KMEM_CACHE(ceph_cap_snap, SLAB_MEM_SPREAD); + if (!ceph_cap_snap_cachep) + goto bad_cap_snap; ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); if (!ceph_cap_flush_cachep) @@ -931,6 +935,8 @@ static int __init init_caches(void) bad_dentry: kmem_cache_destroy(ceph_cap_flush_cachep); bad_cap_flush: + kmem_cache_destroy(ceph_cap_snap_cachep); +bad_cap_snap: kmem_cache_destroy(ceph_cap_cachep); bad_cap: kmem_cache_destroy(ceph_inode_cachep); @@ -947,6 +953,7 @@ static void destroy_caches(void) kmem_cache_destroy(ceph_inode_cachep); kmem_cache_destroy(ceph_cap_cachep); + kmem_cache_destroy(ceph_cap_snap_cachep); kmem_cache_destroy(ceph_cap_flush_cachep); kmem_cache_destroy(ceph_dentry_cachep); kmem_cache_destroy(ceph_file_cachep); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index c0718d5a8fb8..2d08104c8955 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -231,7 +231,7 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap) if (refcount_dec_and_test(&capsnap->nref)) { if (capsnap->xattr_blob) ceph_buffer_put(capsnap->xattr_blob); - kfree(capsnap); + kmem_cache_free(ceph_cap_snap_cachep, capsnap); } } diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index edf62eaa6285..00af2c98da75 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -284,6 +284,7 @@ DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) extern struct kmem_cache *ceph_inode_cachep; extern struct kmem_cache *ceph_cap_cachep; +extern struct kmem_cache *ceph_cap_snap_cachep; extern struct kmem_cache *ceph_cap_flush_cachep; extern struct kmem_cache *ceph_dentry_cachep; extern struct kmem_cache *ceph_file_cachep; -- 2.27.0