Group some variables based on their sizes to reduce holes. On x86_64, this shrinks the size of 'struct ceph_snapid_map' from 72 to 64 bytes. When such a structure is allocated, because of the way memory allocation works, when 72 bytes were requested, 96 bytes were allocated. So, on x86_64, this change saves 32 bytes per allocation and has the structure fit in a single cacheline. Signed-off-by: Christophe JAILLET <christophe.jaillet@xxxxxxxxxx> --- Using pahole Before: ====== struct ceph_snapid_map { struct rb_node node __attribute__((__aligned__(8))); /* 0 24 */ struct list_head lru; /* 24 16 */ atomic_t ref; /* 40 4 */ /* XXX 4 bytes hole, try to pack */ u64 snap; /* 48 8 */ dev_t dev; /* 56 4 */ /* XXX 4 bytes hole, try to pack */ /* --- cacheline 1 boundary (64 bytes) --- */ long unsigned int last_used; /* 64 8 */ /* size: 72, cachelines: 2, members: 6 */ /* sum members: 64, holes: 2, sum holes: 8 */ /* forced alignments: 1 */ /* last cacheline: 8 bytes */ } __attribute__((__aligned__(8))); --- fs/ceph/mds_client.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 0598faa50e2e..2328dbda5ab6 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -355,8 +355,8 @@ struct ceph_snapid_map { struct rb_node node; struct list_head lru; atomic_t ref; - u64 snap; dev_t dev; + u64 snap; unsigned long last_used; }; -- 2.34.1