Pass the first current glock holder into function may_grant and deobfuscate the logic there. While at it, switch from BUG_ON to GLOCK_BUG_ON in may_grant. To make that build cleanly, de-constify the may_grant arguments. We're now using function find_first_holder in do_promote, so move the function's definition above do_promote. Signed-off-by: Andreas Gruenbacher <agruenba@xxxxxxxxxx> --- fs/gfs2/glock.c | 119 ++++++++++++++++++++++++++++-------------------- 1 file changed, 69 insertions(+), 50 deletions(-) diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index e0eaa9cf9fb6..bffd9c20f2de 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -301,46 +301,59 @@ void gfs2_glock_put(struct gfs2_glock *gl) } /** - * may_grant - check if its ok to grant a new lock + * may_grant - check if it's ok to grant a new lock * @gl: The glock + * @current_gh: One of the current holders of @gl * @gh: The lock request which we wish to grant * - * Returns: true if its ok to grant the lock + * With our current compatibility rules, if a glock has one or more active + * holders (HIF_HOLDER flag set), any of those holders can be passed in as + * @current_gh; they are all the same as far as compatibility with the new @gh + * goes. + * + * Returns true if it's ok to grant the lock. */ -static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) -{ - const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list); +static inline bool may_grant(struct gfs2_glock *gl, + struct gfs2_holder *current_gh, + struct gfs2_holder *gh) +{ + if (current_gh) { + GLOCK_BUG_ON(gl, !test_bit(HIF_HOLDER, ¤t_gh->gh_iflags)); + + switch(current_gh->gh_state) { + case LM_ST_EXCLUSIVE: + /* + * Here we make a special exception to grant holders + * who agree to share the EX lock with other holders + * who also have the bit set. If the original holder + * has the LM_FLAG_NODE_SCOPE bit set, we grant more + * holders with the bit set. + */ + return gh->gh_state == LM_ST_EXCLUSIVE && + (current_gh->gh_flags & LM_FLAG_NODE_SCOPE) && + (gh->gh_flags & LM_FLAG_NODE_SCOPE); - if (gh != gh_head) { - /** - * Here we make a special exception to grant holders who agree - * to share the EX lock with other holders who also have the - * bit set. If the original holder has the LM_FLAG_NODE_SCOPE bit - * is set, we grant more holders with the bit set. - */ - if (gh_head->gh_state == LM_ST_EXCLUSIVE && - (gh_head->gh_flags & LM_FLAG_NODE_SCOPE) && - gh->gh_state == LM_ST_EXCLUSIVE && - (gh->gh_flags & LM_FLAG_NODE_SCOPE)) - return 1; - if ((gh->gh_state == LM_ST_EXCLUSIVE || - gh_head->gh_state == LM_ST_EXCLUSIVE)) - return 0; + case LM_ST_SHARED: + case LM_ST_DEFERRED: + return gh->gh_state == current_gh->gh_state; + + default: + return false; + } } + if (gl->gl_state == gh->gh_state) - return 1; + return true; if (gh->gh_flags & GL_EXACT) - return 0; + return false; if (gl->gl_state == LM_ST_EXCLUSIVE) { - if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED) - return 1; - if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED) - return 1; + return gh->gh_state == LM_ST_SHARED || + gh->gh_state == LM_ST_DEFERRED; } - if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) - return 1; - return 0; + if (gh->gh_flags & LM_FLAG_ANY) + return gl->gl_state != LM_ST_UNLOCKED; + return false; } static void gfs2_holder_wake(struct gfs2_holder *gh) @@ -380,6 +393,24 @@ static void do_error(struct gfs2_glock *gl, const int ret) } } +/** + * find_first_holder - find the first "holder" gh + * @gl: the glock + */ + +static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) +{ + struct gfs2_holder *gh; + + if (!list_empty(&gl->gl_holders)) { + gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, + gh_list); + if (test_bit(HIF_HOLDER, &gh->gh_iflags)) + return gh; + } + return NULL; +} + /** * do_promote - promote as many requests as possible on the current queue * @gl: The glock @@ -393,14 +424,15 @@ __releases(&gl->gl_lockref.lock) __acquires(&gl->gl_lockref.lock) { const struct gfs2_glock_operations *glops = gl->gl_ops; - struct gfs2_holder *gh, *tmp; + struct gfs2_holder *gh, *tmp, *first_gh; int ret; restart: + first_gh = find_first_holder(gl); list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { if (test_bit(HIF_HOLDER, &gh->gh_iflags)) continue; - if (may_grant(gl, gh)) { + if (may_grant(gl, first_gh, gh)) { if (gh->gh_list.prev == &gl->gl_holders && glops->go_lock) { spin_unlock(&gl->gl_lockref.lock); @@ -722,23 +754,6 @@ __acquires(&gl->gl_lockref.lock) spin_lock(&gl->gl_lockref.lock); } -/** - * find_first_holder - find the first "holder" gh - * @gl: the glock - */ - -static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) -{ - struct gfs2_holder *gh; - - if (!list_empty(&gl->gl_holders)) { - gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); - if (test_bit(HIF_HOLDER, &gh->gh_iflags)) - return gh; - } - return NULL; -} - /** * run_queue - do all outstanding tasks related to a glock * @gl: The glock in question @@ -1354,8 +1369,12 @@ __acquires(&gl->gl_lockref.lock) GLOCK_BUG_ON(gl, true); if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { - if (test_bit(GLF_LOCK, &gl->gl_flags)) - try_futile = !may_grant(gl, gh); + if (test_bit(GLF_LOCK, &gl->gl_flags)) { + struct gfs2_holder *first_gh; + + first_gh = find_first_holder(gl); + try_futile = !may_grant(gl, first_gh, gh); + } if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) goto fail; } -- 2.31.1