This is a note to let you know that I've just added the patch titled gfs2: finish_xmote cleanup to the 6.8-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: gfs2-finish_xmote-cleanup.patch and it can be found in the queue-6.8 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 34821d884eeeb1694b55addf75a4340d7a00325d Author: Andreas Gruenbacher <agruenba@xxxxxxxxxx> Date: Fri Apr 12 19:16:58 2024 +0200 gfs2: finish_xmote cleanup [ Upstream commit 1cd28e15864054f3c48baee9eecda1c0441c48ac ] Currently, function finish_xmote() takes and releases the glock spinlock. However, all of its callers immediately take that spinlock again, so it makes more sense to take the spin lock before calling finish_xmote() already. With that, thaw_glock() is the only place that sets the GLF_HAVE_REPLY flag outside of the glock spinlock, but it also takes that spinlock immediately thereafter. Change that to set the bit when the spinlock is already held. This allows to switch from test_and_clear_bit() to test_bit() and clear_bit() in glock_work_func(). Signed-off-by: Andreas Gruenbacher <agruenba@xxxxxxxxxx> Stable-dep-of: 9947a06d29c0 ("gfs2: do_xmote fixes") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 1bf0d751ece0a..0c719fcd4fbc5 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -617,7 +617,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) struct gfs2_holder *gh; unsigned state = ret & LM_OUT_ST_MASK; - spin_lock(&gl->gl_lockref.lock); trace_gfs2_glock_state_change(gl, state); state_change(gl, state); gh = find_first_waiter(gl); @@ -665,7 +664,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) gl->gl_target, state); GLOCK_BUG_ON(gl, 1); } - spin_unlock(&gl->gl_lockref.lock); return; } @@ -688,7 +686,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) } out: clear_bit(GLF_LOCK, &gl->gl_flags); - spin_unlock(&gl->gl_lockref.lock); } static bool is_system_glock(struct gfs2_glock *gl) @@ -835,15 +832,19 @@ __acquires(&gl->gl_lockref.lock) if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && target == LM_ST_UNLOCKED && test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) { + spin_lock(&gl->gl_lockref.lock); finish_xmote(gl, target); - gfs2_glock_queue_work(gl, 0); + __gfs2_glock_queue_work(gl, 0); + spin_unlock(&gl->gl_lockref.lock); } else if (ret) { fs_err(sdp, "lm_lock ret %d\n", ret); GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp)); } } else { /* lock_nolock */ + spin_lock(&gl->gl_lockref.lock); finish_xmote(gl, target); - gfs2_glock_queue_work(gl, 0); + __gfs2_glock_queue_work(gl, 0); + spin_unlock(&gl->gl_lockref.lock); } out: spin_lock(&gl->gl_lockref.lock); @@ -1099,11 +1100,12 @@ static void glock_work_func(struct work_struct *work) struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); unsigned int drop_refs = 1; - if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { + spin_lock(&gl->gl_lockref.lock); + if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { + clear_bit(GLF_REPLY_PENDING, &gl->gl_flags); finish_xmote(gl, gl->gl_reply); drop_refs++; } - spin_lock(&gl->gl_lockref.lock); if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && gl->gl_state != LM_ST_UNLOCKED && gl->gl_demote_state != LM_ST_EXCLUSIVE) { @@ -2176,8 +2178,11 @@ static void thaw_glock(struct gfs2_glock *gl) return; if (!lockref_get_not_dead(&gl->gl_lockref)) return; + + spin_lock(&gl->gl_lockref.lock); set_bit(GLF_REPLY_PENDING, &gl->gl_flags); - gfs2_glock_queue_work(gl, 0); + __gfs2_glock_queue_work(gl, 0); + spin_unlock(&gl->gl_lockref.lock); } /**