On Tue, 2017-06-06 at 14:00 -0400, Jeff Layton wrote: > On Tue, 2017-06-06 at 13:19 -0400, Benjamin Coddington wrote: > > Since commit c69899a17ca4 "NFSv4: Update of VFS byte range lock must be > > atomic with the stateid update", NFSv4 has been inserting locks in rpciod > > worker context. The result is that the file_lock's fl_nspid is the > > kworker's pid instead of the original userspace pid. > > > > The fl_nspid is only used to represent the namespaced virtual pid number > > when displaying locks or returning from F_GETLK. There's no reason to set > > it for every inserted lock, since we can usually just look it up from > > fl_pid. So, instead of looking up and holding struct pid for every lock, > > let's just look up the virtual pid number from fl_pid when it is needed. > > That means we can remove fl_nspid entirely. > > > > With this set, I think we ought to codify that the stored pid must be > relative ...to the init_pid_ns. Let's make that clear in the comments for filesystem authors. > > > > Signed-off-by: Benjamin Coddington <bcodding@xxxxxxxxxx> > > --- > > fs/locks.c | 58 ++++++++++++++++++++++++++++++++---------------------- > > include/linux/fs.h | 1 - > > 2 files changed, 35 insertions(+), 24 deletions(-) > > > > diff --git a/fs/locks.c b/fs/locks.c > > index d7daa6c8932f..104398ccc9b9 100644 > > --- a/fs/locks.c > > +++ b/fs/locks.c > > @@ -733,7 +733,6 @@ static void locks_wake_up_blocks(struct file_lock *blocker) > > static void > > locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before) > > { > > - fl->fl_nspid = get_pid(task_tgid(current)); > > list_add_tail(&fl->fl_list, before); > > locks_insert_global_locks(fl); > > } > > @@ -743,10 +742,6 @@ locks_unlink_lock_ctx(struct file_lock *fl) > > { > > locks_delete_global_locks(fl); > > list_del_init(&fl->fl_list); > > - if (fl->fl_nspid) { > > - put_pid(fl->fl_nspid); > > - fl->fl_nspid = NULL; > > - } > > locks_wake_up_blocks(fl); > > } > > > > @@ -823,8 +818,6 @@ posix_test_lock(struct file *filp, struct file_lock *fl) > > list_for_each_entry(cfl, &ctx->flc_posix, fl_list) { > > if (posix_locks_conflict(fl, cfl)) { > > locks_copy_conflock(fl, cfl); > > - if (cfl->fl_nspid) > > - fl->fl_pid = pid_vnr(cfl->fl_nspid); > > goto out; > > } > > } > > @@ -2048,6 +2041,31 @@ int vfs_test_lock(struct file *filp, struct file_lock *fl) > > } > > EXPORT_SYMBOL_GPL(vfs_test_lock); > > > > +/** > > + * locks_translate_pid - translate a pid number into a namespace > > + * @nr: The pid number in the init_pid_ns > > + * @ns: The namespace into which the pid should be translated > > + * > > + * Used to tranlate a fl_pid into a namespace virtual pid number > > + */ > > +static pid_t locks_translate_pid(int init_nr, struct pid_namespace *ns) > > +{ > > + pid_t vnr = 0; > > + struct task_struct *task; > > + > > + rcu_read_lock(); > > + task = find_task_by_pid_ns(init_nr, &init_pid_ns); > > + if (task) > > + get_task_struct(task); > > + rcu_read_unlock(); > > Is that safe? What prevents get_task_struct from doing a 0->1 transition > there after the task usage count has already gone 1->0 and is on its way > to being freed? > > > + if (!task) > > + goto out; > > + vnr = task_pid_nr_ns(task, ns); > > + put_task_struct(task); > > +out: > > + return vnr; > > +} Now that I look, I think it might be best to just do all of that under the rcu_read_lock and don't muck with the refcount at all. So something like: +static pid_t locks_translate_pid(int init_nr, struct pid_namespace *ns) +{ + pid_t vnr = 0; + struct task_struct *task; + + rcu_read_lock(); + task = find_task_by_pid_ns(init_nr, &init_pid_ns); + if (task) + vnr = task_pid_nr_ns(task, ns); + rcu_read_unlock(); + return vnr; +} That should be fine since task_pid_nr_ns pretty much does all of its work under the rcu_read_lock anyway. > > static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) > > { > > flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid; > > @@ -2584,22 +2602,16 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, > > { > > struct inode *inode = NULL; > > unsigned int fl_pid; > > + struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info; > > > > - if (fl->fl_nspid) { > > - struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info; > > - > > - /* Don't let fl_pid change based on who is reading the file */ > > - fl_pid = pid_nr_ns(fl->fl_nspid, proc_pidns); > > - > > - /* > > - * If there isn't a fl_pid don't display who is waiting on > > - * the lock if we are called from locks_show, or if we are > > - * called from __show_fd_info - skip lock entirely > > - */ > > - if (fl_pid == 0) > > - return; > > - } else > > - fl_pid = fl->fl_pid; > > + fl_pid = locks_translate_pid(fl->fl_pid, proc_pidns); > > + /* > > + * If there isn't a fl_pid don't display who is waiting on > > + * the lock if we are called from locks_show, or if we are > > + * called from __show_fd_info - skip lock entirely > > + */ > > + if (fl_pid == 0) > > + return; > > > > if (fl->fl_file != NULL) > > inode = locks_inode(fl->fl_file); > > @@ -2674,7 +2686,7 @@ static int locks_show(struct seq_file *f, void *v) > > > > fl = hlist_entry(v, struct file_lock, fl_link); > > > > - if (fl->fl_nspid && !pid_nr_ns(fl->fl_nspid, proc_pidns)) > > + if (locks_translate_pid(fl->fl_pid, proc_pidns) == 0) > > return 0; > > > > lock_get_status(f, fl, iter->li_pos, ""); > > diff --git a/include/linux/fs.h b/include/linux/fs.h > > index aa4affb38c39..b013fac515f7 100644 > > --- a/include/linux/fs.h > > +++ b/include/linux/fs.h > > @@ -984,7 +984,6 @@ struct file_lock { > > unsigned char fl_type; > > unsigned int fl_pid; > > int fl_link_cpu; /* what cpu's list is this on? */ > > - struct pid *fl_nspid; > > wait_queue_head_t fl_wait; > > struct file *fl_file; > > loff_t fl_start; > > -- Jeff Layton <jlayton@xxxxxxxxxx>