This is a note to let you know that I've just added the patch titled gfs2: Clean up function may_grant to the 5.15-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: gfs2-clean-up-function-may_grant.patch and it can be found in the queue-5.15 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From foo@baz Fri Apr 29 11:07:48 AM CEST 2022 From: Anand Jain <anand.jain@xxxxxxxxxx> Date: Fri, 15 Apr 2022 06:28:43 +0800 Subject: gfs2: Clean up function may_grant To: stable@xxxxxxxxxxxxxxx Cc: linux-btrfs@xxxxxxxxxxxxxxx, Andreas Gruenbacher <agruenba@xxxxxxxxxx>, Anand Jain <anand.jain@xxxxxxxxxx> Message-ID: <16061e1d0b15ee024905913510b9569e0c5011b4.1649951733.git.anand.jain@xxxxxxxxxx> From: Andreas Gruenbacher <agruenba@xxxxxxxxxx> commit 6144464937fe1e6135b13a30502a339d549bf093 upstream Pass the first current glock holder into function may_grant and deobfuscate the logic there. While at it, switch from BUG_ON to GLOCK_BUG_ON in may_grant. To make that build cleanly, de-constify the may_grant arguments. We're now using function find_first_holder in do_promote, so move the function's definition above do_promote. Signed-off-by: Andreas Gruenbacher <agruenba@xxxxxxxxxx> Signed-off-by: Anand Jain <anand.jain@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- fs/gfs2/glock.c | 119 ++++++++++++++++++++++++++++++++------------------------ 1 file changed, 69 insertions(+), 50 deletions(-) --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -301,46 +301,59 @@ void gfs2_glock_put(struct gfs2_glock *g } /** - * may_grant - check if its ok to grant a new lock + * may_grant - check if it's ok to grant a new lock * @gl: The glock + * @current_gh: One of the current holders of @gl * @gh: The lock request which we wish to grant * - * Returns: true if its ok to grant the lock + * With our current compatibility rules, if a glock has one or more active + * holders (HIF_HOLDER flag set), any of those holders can be passed in as + * @current_gh; they are all the same as far as compatibility with the new @gh + * goes. + * + * Returns true if it's ok to grant the lock. */ -static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) -{ - const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list); +static inline bool may_grant(struct gfs2_glock *gl, + struct gfs2_holder *current_gh, + struct gfs2_holder *gh) +{ + if (current_gh) { + GLOCK_BUG_ON(gl, !test_bit(HIF_HOLDER, ¤t_gh->gh_iflags)); + + switch(current_gh->gh_state) { + case LM_ST_EXCLUSIVE: + /* + * Here we make a special exception to grant holders + * who agree to share the EX lock with other holders + * who also have the bit set. If the original holder + * has the LM_FLAG_NODE_SCOPE bit set, we grant more + * holders with the bit set. + */ + return gh->gh_state == LM_ST_EXCLUSIVE && + (current_gh->gh_flags & LM_FLAG_NODE_SCOPE) && + (gh->gh_flags & LM_FLAG_NODE_SCOPE); - if (gh != gh_head) { - /** - * Here we make a special exception to grant holders who agree - * to share the EX lock with other holders who also have the - * bit set. If the original holder has the LM_FLAG_NODE_SCOPE bit - * is set, we grant more holders with the bit set. - */ - if (gh_head->gh_state == LM_ST_EXCLUSIVE && - (gh_head->gh_flags & LM_FLAG_NODE_SCOPE) && - gh->gh_state == LM_ST_EXCLUSIVE && - (gh->gh_flags & LM_FLAG_NODE_SCOPE)) - return 1; - if ((gh->gh_state == LM_ST_EXCLUSIVE || - gh_head->gh_state == LM_ST_EXCLUSIVE)) - return 0; + case LM_ST_SHARED: + case LM_ST_DEFERRED: + return gh->gh_state == current_gh->gh_state; + + default: + return false; + } } + if (gl->gl_state == gh->gh_state) - return 1; + return true; if (gh->gh_flags & GL_EXACT) - return 0; + return false; if (gl->gl_state == LM_ST_EXCLUSIVE) { - if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED) - return 1; - if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED) - return 1; + return gh->gh_state == LM_ST_SHARED || + gh->gh_state == LM_ST_DEFERRED; } - if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) - return 1; - return 0; + if (gh->gh_flags & LM_FLAG_ANY) + return gl->gl_state != LM_ST_UNLOCKED; + return false; } static void gfs2_holder_wake(struct gfs2_holder *gh) @@ -381,6 +394,24 @@ static void do_error(struct gfs2_glock * } /** + * find_first_holder - find the first "holder" gh + * @gl: the glock + */ + +static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) +{ + struct gfs2_holder *gh; + + if (!list_empty(&gl->gl_holders)) { + gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, + gh_list); + if (test_bit(HIF_HOLDER, &gh->gh_iflags)) + return gh; + } + return NULL; +} + +/** * do_promote - promote as many requests as possible on the current queue * @gl: The glock * @@ -393,14 +424,15 @@ __releases(&gl->gl_lockref.lock) __acquires(&gl->gl_lockref.lock) { const struct gfs2_glock_operations *glops = gl->gl_ops; - struct gfs2_holder *gh, *tmp; + struct gfs2_holder *gh, *tmp, *first_gh; int ret; restart: + first_gh = find_first_holder(gl); list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { if (test_bit(HIF_HOLDER, &gh->gh_iflags)) continue; - if (may_grant(gl, gh)) { + if (may_grant(gl, first_gh, gh)) { if (gh->gh_list.prev == &gl->gl_holders && glops->go_lock) { spin_unlock(&gl->gl_lockref.lock); @@ -723,23 +755,6 @@ out: } /** - * find_first_holder - find the first "holder" gh - * @gl: the glock - */ - -static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) -{ - struct gfs2_holder *gh; - - if (!list_empty(&gl->gl_holders)) { - gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); - if (test_bit(HIF_HOLDER, &gh->gh_iflags)) - return gh; - } - return NULL; -} - -/** * run_queue - do all outstanding tasks related to a glock * @gl: The glock in question * @nonblock: True if we must not block in run_queue @@ -1354,8 +1369,12 @@ __acquires(&gl->gl_lockref.lock) GLOCK_BUG_ON(gl, true); if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { - if (test_bit(GLF_LOCK, &gl->gl_flags)) - try_futile = !may_grant(gl, gh); + if (test_bit(GLF_LOCK, &gl->gl_flags)) { + struct gfs2_holder *first_gh; + + first_gh = find_first_holder(gl); + try_futile = !may_grant(gl, first_gh, gh); + } if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) goto fail; } Patches currently in stable-queue which might be from anand.jain@xxxxxxxxxx are queue-5.15/gup-turn-fault_in_pages_-readable-writeable-into-fault_in_-readable-writeable.patch queue-5.15/iov_iter-introduce-nofault-flag-to-disable-page-faults.patch queue-5.15/gfs2-fix-mmap-page-fault-deadlocks-for-direct-i-o.patch queue-5.15/iomap-support-partial-direct-i-o-on-user-copy-failures.patch queue-5.15/mm-gup-make-fault_in_safe_writeable-use-fixup_user_fault.patch queue-5.15/gfs2-add-wrapper-for-iomap_file_buffered_write.patch queue-5.15/gup-introduce-foll_nofault-flag-to-disable-page-faults.patch queue-5.15/iov_iter-introduce-fault_in_iov_iter_writeable.patch queue-5.15/gfs2-fix-mmap-page-fault-deadlocks-for-buffered-i-o.patch queue-5.15/btrfs-fallback-to-blocking-mode-when-doing-async-dio-over-multiple-extents.patch queue-5.15/gfs2-clean-up-function-may_grant.patch queue-5.15/btrfs-fix-deadlock-due-to-page-faults-during-direct-io-reads-and-writes.patch queue-5.15/iov_iter-turn-iov_iter_fault_in_readable-into-fault_in_iov_iter_readable.patch queue-5.15/gfs2-move-the-inode-glock-locking-to-gfs2_file_buffered_write.patch queue-5.15/gfs2-introduce-flag-for-glock-holder-auto-demotion.patch queue-5.15/iomap-fix-iomap_dio_rw-return-value-for-user-copies.patch queue-5.15/gfs2-eliminate-ip-i_gh.patch queue-5.15/iomap-add-done_before-argument-to-iomap_dio_rw.patch