From: Darrick J. Wong <djwong@xxxxxxxxxx> When a writer thread executes a chain of log intent items for the realtime volume, the ILOCKs taken during each step are for each rt metadata file, not the entire rt volume itself. Although scrub takes all rt metadata ILOCKs, this isn't sufficient to guard against scrub checking the rt volume while that writer thread is in the middle of finishing a chain because there's no higher level locking primitive guarding the realtime volume. When there's a collision, cross-referencing between data structures (e.g. rtrmapbt and rtrefcountbt) yields false corruption events; if repair is running, this results in incorrect repairs, which is catastrophic. Fix this by adding to the mount structure the same drain that we use to protect scrub against concurrent AG updates, but this time for the realtime volume. Signed-off-by: Darrick J. Wong <djwong@xxxxxxxxxx> --- include/xfs_mount.h | 5 +++++ libxfs/defer_item.c | 9 ++++++++- libxfs/xfs_rtgroup.c | 3 +++ libxfs/xfs_rtgroup.h | 9 +++++++++ 4 files changed, 25 insertions(+), 1 deletion(-) diff --git a/include/xfs_mount.h b/include/xfs_mount.h index a4d0ba70e83..ca79c420afb 100644 --- a/include/xfs_mount.h +++ b/include/xfs_mount.h @@ -311,6 +311,11 @@ struct xfs_drain { /* empty */ }; static inline void xfs_perag_bump_intents(struct xfs_perag *pag) { } static inline void xfs_perag_drop_intents(struct xfs_perag *pag) { } +struct xfs_rtgroup; + +static inline void xfs_rtgroup_bump_intents(struct xfs_rtgroup *rtg) { } +static inline void xfs_rtgroup_drop_intents(struct xfs_rtgroup *rtg) { } + #define xfs_drain_free(dr) ((void)0) #define xfs_drain_init(dr) ((void)0) diff --git a/libxfs/defer_item.c b/libxfs/defer_item.c index 9a4196f7cc0..baf3b9e6204 100644 --- a/libxfs/defer_item.c +++ b/libxfs/defer_item.c @@ -91,6 +91,7 @@ xfs_extent_free_get_group( rgno = xfs_rtb_to_rgno(mp, xefi->xefi_startblock); xefi->xefi_rtg = xfs_rtgroup_get(mp, rgno); + xfs_rtgroup_bump_intents(xefi->xefi_rtg); return; } @@ -105,6 +106,7 @@ xfs_extent_free_put_group( struct xfs_extent_free_item *xefi) { if (xfs_efi_is_realtime(xefi)) { + xfs_rtgroup_drop_intents(xefi->xefi_rtg); xfs_rtgroup_put(xefi->xefi_rtg); return; } @@ -275,6 +277,7 @@ xfs_rmap_update_get_group( rgno = xfs_rtb_to_rgno(mp, ri->ri_bmap.br_startblock); ri->ri_rtg = xfs_rtgroup_get(mp, rgno); + xfs_rtgroup_bump_intents(ri->ri_rtg); return; } @@ -289,6 +292,7 @@ xfs_rmap_update_put_group( struct xfs_rmap_intent *ri) { if (ri->ri_realtime) { + xfs_rtgroup_drop_intents(ri->ri_rtg); xfs_rtgroup_put(ri->ri_rtg); return; } @@ -522,6 +526,7 @@ xfs_bmap_update_get_group( rgno = xfs_rtb_to_rgno(mp, bi->bi_bmap.br_startblock); bi->bi_rtg = xfs_rtgroup_get(mp, rgno); + xfs_rtgroup_bump_intents(bi->bi_rtg); } else { bi->bi_rtg = NULL; } @@ -548,8 +553,10 @@ xfs_bmap_update_put_group( struct xfs_bmap_intent *bi) { if (xfs_ifork_is_realtime(bi->bi_owner, bi->bi_whichfork)) { - if (xfs_has_rtgroups(bi->bi_owner->i_mount)) + if (xfs_has_rtgroups(bi->bi_owner->i_mount)) { + xfs_rtgroup_drop_intents(bi->bi_rtg); xfs_rtgroup_put(bi->bi_rtg); + } return; } diff --git a/libxfs/xfs_rtgroup.c b/libxfs/xfs_rtgroup.c index 8018cd02e70..8c41869a61a 100644 --- a/libxfs/xfs_rtgroup.c +++ b/libxfs/xfs_rtgroup.c @@ -129,6 +129,8 @@ xfs_initialize_rtgroups( #ifdef __KERNEL__ /* Place kernel structure only init below this point. */ spin_lock_init(&rtg->rtg_state_lock); + xfs_drain_init(&rtg->rtg_intents); + #endif /* __KERNEL__ */ /* first new rtg is fully initialized */ @@ -180,6 +182,7 @@ xfs_free_rtgroups( spin_unlock(&mp->m_rtgroup_lock); ASSERT(rtg); XFS_IS_CORRUPT(rtg->rtg_mount, atomic_read(&rtg->rtg_ref) != 0); + xfs_drain_free(&rtg->rtg_intents); call_rcu(&rtg->rcu_head, __xfs_free_rtgroups); } diff --git a/libxfs/xfs_rtgroup.h b/libxfs/xfs_rtgroup.h index 3230dd03d8f..1d41a2cac34 100644 --- a/libxfs/xfs_rtgroup.h +++ b/libxfs/xfs_rtgroup.h @@ -37,6 +37,15 @@ struct xfs_rtgroup { #ifdef __KERNEL__ /* -- kernel only structures below this line -- */ spinlock_t rtg_state_lock; + + /* + * We use xfs_drain to track the number of deferred log intent items + * that have been queued (but not yet processed) so that waiters (e.g. + * scrub) will not lock resources when other threads are in the middle + * of processing a chain of intent items only to find momentary + * inconsistencies. + */ + struct xfs_drain rtg_intents; #endif /* __KERNEL__ */ };