This is rather natural action after previous patches, and it just decreases load of fc->lock. Signed-off-by: Kirill Tkhai <ktkhai@xxxxxxxxxxxxx> --- fs/fuse/dev.c | 10 ++++++---- fs/fuse/fuse_i.h | 5 ++++- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 3bfc5ed61c9a..cc8b95ad5b16 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -247,17 +247,18 @@ static struct fuse_req *get_reserved_req(struct fuse_conn *fc, struct file *file) { struct fuse_req *req = NULL; + struct fuse_inode *fi = get_fuse_inode(file_inode(file)); struct fuse_file *ff = file->private_data; do { wait_event(fc->reserved_req_waitq, ff->reserved_req); - spin_lock(&fc->lock); + spin_lock(&fi->lock); if (ff->reserved_req) { req = ff->reserved_req; ff->reserved_req = NULL; req->stolen_file = get_file(file); } - spin_unlock(&fc->lock); + spin_unlock(&fi->lock); } while (!req); return req; @@ -269,16 +270,17 @@ static struct fuse_req *get_reserved_req(struct fuse_conn *fc, static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) { struct file *file = req->stolen_file; + struct fuse_inode *fi = get_fuse_inode(file_inode(file)); struct fuse_file *ff = file->private_data; WARN_ON(req->max_pages); - spin_lock(&fc->lock); + spin_lock(&fi->lock); memset(req, 0, sizeof(*req)); fuse_request_init(req, NULL, NULL, 0); BUG_ON(ff->reserved_req); ff->reserved_req = req; wake_up_all(&fc->reserved_req_waitq); - spin_unlock(&fc->lock); + spin_unlock(&fi->lock); fput(file); } diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index f7442bcecbb0..10b20a24f693 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -166,7 +166,10 @@ struct fuse_file { /** Fuse connection for this file */ struct fuse_conn *fc; - /** Request reserved for flush and release */ + /* + * Request reserved for flush and release. + * Modified under relative fuse_inode::lock. + */ struct fuse_req *reserved_req; /** Kernel file handle guaranteed to be unique */