Switch to the new kmem_cache_create_rcu() helper which allows us to use a custom free pointer offset avoiding the need to have an external free pointer which would grow struct file behind our backs. Signed-off-by: Christian Brauner <brauner@xxxxxxxxxx> --- fs/file_table.c | 6 +++--- include/linux/fs.h | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/fs/file_table.c b/fs/file_table.c index 694199a1a966..83d5ac1fadc0 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -514,9 +514,9 @@ EXPORT_SYMBOL(__fput_sync); void __init files_init(void) { - filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, - SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN | - SLAB_PANIC | SLAB_ACCOUNT, NULL); + filp_cachep = kmem_cache_create_rcu("filp", sizeof(struct file), + offsetof(struct file, f_freeptr), + SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT); percpu_counter_init(&nr_files, 0, GFP_KERNEL); } diff --git a/include/linux/fs.h b/include/linux/fs.h index 61097a9cf317..5ca83e87adef 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1026,6 +1026,7 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) * @f_task_work: task work entry point * @f_llist: work queue entrypoint * @f_ra: file's readahead state + * @f_freeptr: Pointer used by SLAB_TYPESAFE_BY_RCU file cache (don't touch.) */ struct file { atomic_long_t f_count; @@ -1057,6 +1058,7 @@ struct file { struct callback_head f_task_work; struct llist_node f_llist; struct file_ra_state f_ra; + freeptr_t f_freeptr; }; /* --- cacheline 3 boundary (192 bytes) --- */ } __randomize_layout -- 2.45.2