get_next_ino has a number of problems: - It uses and returns a uint, which is susceptible to become overflowed if a lot of volatile inodes that use get_next_ino are created. - It's global, with no specificity per-sb or even per-filesystem. This means it's not that difficult to cause inode number wraparounds on a single device, which can result in having multiple distinct inodes with the same inode number. This patch adds a per-superblock counter that mitigates the second case. This design also allows us to later have a specific i_ino size per-device, for example, allowing users to choose whether to use 32- or 64-bit inodes for each tmpfs mount. This is implemented in the next commit. Signed-off-by: Chris Down <chris@xxxxxxxxxxxxxx> Reviewed-by: Amir Goldstein <amir73il@xxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Al Viro <viro@xxxxxxxxxxxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Jeff Layton <jlayton@xxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Tejun Heo <tj@xxxxxxxxxx> Cc: linux-mm@xxxxxxxxx Cc: linux-fsdevel@xxxxxxxxxxxxxxx Cc: linux-kernel@xxxxxxxxxxxxxxx Cc: kernel-team@xxxxxx --- include/linux/shmem_fs.h | 1 + mm/shmem.c | 30 +++++++++++++++++++++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) v5: Nothing in code, just resending with correct linux-mm domain. diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index de8e4b71e3ba..7fac91f490dc 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -35,6 +35,7 @@ struct shmem_sb_info { unsigned char huge; /* Whether to try for hugepages */ kuid_t uid; /* Mount uid for root directory */ kgid_t gid; /* Mount gid for root directory */ + ino_t next_ino; /* The next per-sb inode number to use */ struct mempolicy *mpol; /* default memory policy for mappings */ spinlock_t shrinklist_lock; /* Protects shrinklist */ struct list_head shrinklist; /* List of shinkable inodes */ diff --git a/mm/shmem.c b/mm/shmem.c index 8793e8cc1a48..9e97ba972225 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2236,6 +2236,12 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma) return 0; } +/* + * shmem_get_inode - reserve, allocate, and initialise a new inode + * + * If this tmpfs is from kern_mount we use get_next_ino, which is global, since + * inum churn there is low and this avoids taking locks. + */ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, umode_t mode, dev_t dev, unsigned long flags) { @@ -2248,7 +2254,28 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode inode = new_inode(sb); if (inode) { - inode->i_ino = get_next_ino(); + if (sb->s_flags & SB_KERNMOUNT) { + /* + * __shmem_file_setup, one of our callers, is lock-free: + * it doesn't hold stat_lock in shmem_reserve_inode + * since max_inodes is always 0, and is called from + * potentially unknown contexts. As such, use the global + * allocator which doesn't require the per-sb stat_lock. + */ + inode->i_ino = get_next_ino(); + } else { + spin_lock(&sbinfo->stat_lock); + if (unlikely(sbinfo->next_ino > UINT_MAX)) { + /* + * Emulate get_next_ino uint wraparound for + * compatibility + */ + sbinfo->next_ino = 1; + } + inode->i_ino = sbinfo->next_ino++; + spin_unlock(&sbinfo->stat_lock); + } + inode_init_owner(inode, dir, mode); inode->i_blocks = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); @@ -3662,6 +3689,7 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) #else sb->s_flags |= SB_NOUSER; #endif + sbinfo->next_ino = 1; sbinfo->max_blocks = ctx->blocks; sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes; sbinfo->uid = ctx->uid; -- 2.24.1