This is a note to let you know that I've just added the patch titled gfs2: do_xmote fixes to the 6.6-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: gfs2-do_xmote-fixes.patch and it can be found in the queue-6.6 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 665e9792edd34e17fa045a903379023f02f7b21e Author: Andreas Gruenbacher <agruenba@xxxxxxxxxx> Date: Mon Apr 15 11:23:04 2024 +0200 gfs2: do_xmote fixes [ Upstream commit 9947a06d29c0a30da88cdc6376ca5fd87083e130 ] Function do_xmote() is called with the glock spinlock held. Commit 86934198eefa added a 'goto skip_inval' statement at the beginning of the function to further below where the glock spinlock is expected not to be held anymore. Then it added code there that requires the glock spinlock to be held. This doesn't make sense; fix this up by dropping and retaking the spinlock where needed. In addition, when ->lm_lock() returned an error, do_xmote() didn't fail the locking operation, and simply left the glock hanging; fix that as well. (This is a much older error.) Fixes: 86934198eefa ("gfs2: Clear flags when withdraw prevents xmote") Signed-off-by: Andreas Gruenbacher <agruenba@xxxxxxxxxx> Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 009a6a6312c2f..685e3ef9e9008 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -696,6 +696,7 @@ __acquires(&gl->gl_lockref.lock) { const struct gfs2_glock_operations *glops = gl->gl_ops; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct lm_lockstruct *ls = &sdp->sd_lockstruct; unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); int ret; @@ -724,6 +725,9 @@ __acquires(&gl->gl_lockref.lock) (gl->gl_state == LM_ST_EXCLUSIVE) || (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) clear_bit(GLF_BLOCKING, &gl->gl_flags); + if (!glops->go_inval && !glops->go_sync) + goto skip_inval; + spin_unlock(&gl->gl_lockref.lock); if (glops->go_sync) { ret = glops->go_sync(gl); @@ -736,6 +740,7 @@ __acquires(&gl->gl_lockref.lock) fs_err(sdp, "Error %d syncing glock \n", ret); gfs2_dump_glock(NULL, gl, true); } + spin_lock(&gl->gl_lockref.lock); goto skip_inval; } } @@ -756,9 +761,10 @@ __acquires(&gl->gl_lockref.lock) glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); } + spin_lock(&gl->gl_lockref.lock); skip_inval: - gfs2_glock_hold(gl); + gl->gl_lockref.count++; /* * Check for an error encountered since we called go_sync and go_inval. * If so, we can't withdraw from the glock code because the withdraw @@ -800,37 +806,37 @@ __acquires(&gl->gl_lockref.lock) */ clear_bit(GLF_LOCK, &gl->gl_flags); clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); - gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); - goto out; + __gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); + return; } else { clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); } } - if (sdp->sd_lockstruct.ls_ops->lm_lock) { - struct lm_lockstruct *ls = &sdp->sd_lockstruct; + if (ls->ls_ops->lm_lock) { + spin_unlock(&gl->gl_lockref.lock); + ret = ls->ls_ops->lm_lock(gl, target, lck_flags); + spin_lock(&gl->gl_lockref.lock); - /* lock_dlm */ - ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && target == LM_ST_UNLOCKED && test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) { - spin_lock(&gl->gl_lockref.lock); - finish_xmote(gl, target); - __gfs2_glock_queue_work(gl, 0); - spin_unlock(&gl->gl_lockref.lock); + /* + * The lockspace has been released and the lock has + * been unlocked implicitly. + */ } else if (ret) { fs_err(sdp, "lm_lock ret %d\n", ret); - GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp)); + target = gl->gl_state | LM_OUT_ERROR; + } else { + /* The operation will be completed asynchronously. */ + return; } - } else { /* lock_nolock */ - spin_lock(&gl->gl_lockref.lock); - finish_xmote(gl, target); - __gfs2_glock_queue_work(gl, 0); - spin_unlock(&gl->gl_lockref.lock); } -out: - spin_lock(&gl->gl_lockref.lock); + + /* Complete the operation now. */ + finish_xmote(gl, target); + __gfs2_glock_queue_work(gl, 0); } /**