get_next_ino has a number of problems: - It uses and returns a uint, which is susceptible to become overflowed if a lot of volatile inodes that use get_next_ino are created. - It's global, with no specificity per-sb or even per-filesystem. This means it's not that difficult to cause inode number wraparounds on a single device, which can result in having multiple distinct inodes with the same inode number. This patch adds a per-superblock counter that mitigates the second case. This design also allows us to later have a specific i_ino size per-device, for example, allowing users to choose whether to use 32- or 64-bit inodes for each tmpfs mount. This is implemented in the next commit. Signed-off-by: Chris Down <chris@xxxxxxxxxxxxxx> Cc: Al Viro <viro@xxxxxxxxxxxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Amir Goldstein <amir73il@xxxxxxxxx> Cc: Jeff Layton <jlayton@xxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Tejun Heo <tj@xxxxxxxxxx> Cc: linux-fsdevel@xxxxxxxxxxxxxxx Cc: linux-kernel@xxxxxxxxxxxxxxx Cc: kernel-team@xxxxxx --- include/linux/shmem_fs.h | 1 + mm/shmem.c | 55 ++++++++++++++++++++++++++++++++++------ 2 files changed, 48 insertions(+), 8 deletions(-) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index de8e4b71e3ba..dec4353cf3b7 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -35,6 +35,7 @@ struct shmem_sb_info { unsigned char huge; /* Whether to try for hugepages */ kuid_t uid; /* Mount uid for root directory */ kgid_t gid; /* Mount gid for root directory */ + ino_t last_ino; /* The last used per-sb inode number */ struct mempolicy *mpol; /* default memory policy for mappings */ spinlock_t shrinklist_lock; /* Protects shrinklist */ struct list_head shrinklist; /* List of shinkable inodes */ diff --git a/mm/shmem.c b/mm/shmem.c index 165fa6332993..8af9fb922a96 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2235,8 +2235,18 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma) return 0; } +/* + * shmem_get_inode - reserve, allocate, and initialise a new inode + * + * If usb_sb_ino is true, we use the per-sb inode allocator to avoid wraparound. + * Otherwise, we use get_next_ino, which is global. + * + * If use_sb_ino is true or max_inodes is greater than 0, we may have to grab + * the per-sb stat_lock. + */ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, - umode_t mode, dev_t dev, unsigned long flags) + umode_t mode, dev_t dev, + unsigned long flags, bool use_sb_ino) { struct inode *inode; struct shmem_inode_info *info; @@ -2247,7 +2257,30 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode inode = new_inode(sb); if (inode) { - inode->i_ino = get_next_ino(); + if (use_sb_ino) { + spin_lock(&sbinfo->stat_lock); + inode->i_ino = sbinfo->last_ino++; + if (unlikely(inode->i_ino >= UINT_MAX)) { + /* + * Emulate get_next_ino uint wraparound for + * compatibility + */ + pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n", + __func__, MINOR(sb->s_dev)); + inode->i_ino = sbinfo->last_ino = 1; + } + spin_unlock(&sbinfo->stat_lock); + } else { + /* + * __shmem_file_setup, one of our callers, is lock-free: + * it doesn't hold stat_lock in shmem_reserve_inode + * since max_inodes is always 0, and is called from + * potentially unknown contexts. As such, use the global + * allocator which doesn't require the per-sb stat_lock. + */ + inode->i_ino = get_next_ino(); + } + inode_init_owner(inode, dir, mode); inode->i_blocks = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); @@ -2881,7 +2914,7 @@ shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) struct inode *inode; int error = -ENOSPC; - inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); + inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE, true); if (inode) { error = simple_acl_create(dir, inode); if (error) @@ -2910,7 +2943,7 @@ shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) struct inode *inode; int error = -ENOSPC; - inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); + inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE, true); if (inode) { error = security_inode_init_security(inode, dir, NULL, @@ -3106,7 +3139,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s return -ENAMETOOLONG; inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0, - VM_NORESERVE); + VM_NORESERVE, true); if (!inode) return -ENOSPC; @@ -3378,6 +3411,8 @@ enum shmem_param { Opt_nr_inodes, Opt_size, Opt_uid, + Opt_inode32, + Opt_inode64, }; static const struct fs_parameter_spec shmem_param_specs[] = { @@ -3389,6 +3424,8 @@ static const struct fs_parameter_spec shmem_param_specs[] = { fsparam_string("nr_inodes", Opt_nr_inodes), fsparam_string("size", Opt_size), fsparam_u32 ("uid", Opt_uid), + fsparam_flag ("inode32", Opt_inode32), + fsparam_flag ("inode64", Opt_inode64), {} }; @@ -3690,7 +3727,8 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) #endif uuid_gen(&sb->s_uuid); - inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); + inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, + VM_NORESERVE, true); if (!inode) goto failed; inode->i_uid = sbinfo->uid; @@ -4081,7 +4119,8 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range); #define shmem_vm_ops generic_file_vm_ops #define shmem_file_operations ramfs_file_operations -#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) +#define shmem_get_inode(sb, dir, mode, dev, flags, use_sb_ino) \ + ramfs_get_inode(sb, dir, mode, dev) #define shmem_acct_size(flags, size) 0 #define shmem_unacct_size(flags, size) do {} while (0) @@ -4105,7 +4144,7 @@ static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, l return ERR_PTR(-ENOMEM); inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0, - flags); + flags, false); if (unlikely(!inode)) { shmem_unacct_size(flags, size); return ERR_PTR(-ENOSPC); -- 2.24.1