This is a note to let you know that I've just added the patch titled gfs2: Replace gfs2_glock_queue_put with gfs2_glock_put_async to the 6.6-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: gfs2-replace-gfs2_glock_queue_put-with-gfs2_glock_pu.patch and it can be found in the queue-6.6 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 13fc9bbf2185748af632ae9690eb542bbf5a88be Author: Andreas Gruenbacher <agruenba@xxxxxxxxxx> Date: Fri Mar 15 16:45:39 2024 +0100 gfs2: Replace gfs2_glock_queue_put with gfs2_glock_put_async [ Upstream commit ee2be7d7c7f32783f60ee5fe59b91548a4571f10 ] Function gfs2_glock_queue_put() puts a glock reference by enqueuing glock work instead of putting the reference directly. This ensures that the operation won't sleep, but it is costly and really only necessary when putting the final glock reference. Replace it with a new gfs2_glock_put_async() function that only queues glock work when putting the last glock reference. Signed-off-by: Andreas Gruenbacher <agruenba@xxxxxxxxxx> Stable-dep-of: 7c6f714d8847 ("gfs2: Fix unlinked inode cleanup") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 685e3ef9e9008..88ddc9828c6c0 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -311,14 +311,6 @@ static void __gfs2_glock_put(struct gfs2_glock *gl) sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); } -/* - * Cause the glock to be put in work queue context. - */ -void gfs2_glock_queue_put(struct gfs2_glock *gl) -{ - gfs2_glock_queue_work(gl, 0); -} - /** * gfs2_glock_put() - Decrement reference count on glock * @gl: The glock to put @@ -333,6 +325,22 @@ void gfs2_glock_put(struct gfs2_glock *gl) __gfs2_glock_put(gl); } +/* + * gfs2_glock_put_async - Decrement reference count without sleeping + * @gl: The glock to put + * + * Decrement the reference count on glock immediately unless it is the last + * reference. Defer putting the last reference to work queue context. + */ +void gfs2_glock_put_async(struct gfs2_glock *gl) +{ + if (lockref_put_or_lock(&gl->gl_lockref)) + return; + + __gfs2_glock_queue_work(gl, 0); + spin_unlock(&gl->gl_lockref.lock); +} + /** * may_grant - check if it's ok to grant a new lock * @gl: The glock @@ -2533,8 +2541,7 @@ static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n) if (gl) { if (n == 0) return; - if (!lockref_put_not_zero(&gl->gl_lockref)) - gfs2_glock_queue_put(gl); + gfs2_glock_put_async(gl); } for (;;) { gl = rhashtable_walk_next(&gi->hti); diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index f7ee9ca948eee..29fd58de0597d 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -186,7 +186,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, int create, struct gfs2_glock **glp); struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl); void gfs2_glock_put(struct gfs2_glock *gl); -void gfs2_glock_queue_put(struct gfs2_glock *gl); +void gfs2_glock_put_async(struct gfs2_glock *gl); void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, struct gfs2_holder *gh, diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 767549066066c..2be5551241b3a 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -790,7 +790,7 @@ void gfs2_glock_remove_revoke(struct gfs2_glock *gl) { if (atomic_dec_return(&gl->gl_revokes) == 0) { clear_bit(GLF_LFLUSH, &gl->gl_flags); - gfs2_glock_queue_put(gl); + gfs2_glock_put_async(gl); } } diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 1200cb8059995..b37f8bd79286a 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1054,7 +1054,7 @@ static int gfs2_drop_inode(struct inode *inode) gfs2_glock_hold(gl); if (!gfs2_queue_try_to_evict(gl)) - gfs2_glock_queue_put(gl); + gfs2_glock_put_async(gl); return 0; } @@ -1270,7 +1270,7 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip) static void gfs2_glock_put_eventually(struct gfs2_glock *gl) { if (current->flags & PF_MEMALLOC) - gfs2_glock_queue_put(gl); + gfs2_glock_put_async(gl); else gfs2_glock_put(gl); }