[PATCH 2/4] xfs_scrub: prepare phase3 for per-inogrp worker threads

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Darrick J. Wong <djwong@xxxxxxxxxx>

In the next patch, we're going to rewrite scrub_scan_all_inodes to
schedule per-inogrp workqueue items that will run the iterator function.
In other words, the worker threads in phase 3 wil soon cease to be
per-AG threads.

To prepare for this, we must modify phase 3 so that any writes to shared
state are protected by the appropriate per-AG locks.  As far as I can
tell, the only updates to shared state are the per-AG action lists, so
create some per-AG locks for phase 3 and create locked wrappers for the
action_list_* functions if we find things to repair.

Signed-off-by: Darrick J. Wong <djwong@xxxxxxxxxx>
---
 scrub/phase3.c |   44 +++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 39 insertions(+), 5 deletions(-)


diff --git a/scrub/phase3.c b/scrub/phase3.c
index d659a779..65e903f2 100644
--- a/scrub/phase3.c
+++ b/scrub/phase3.c
@@ -26,6 +26,9 @@ struct scrub_inode_ctx {
 	/* Number of inodes scanned. */
 	struct ptcounter	*icount;
 
+	/* per-AG locks to protect the repair lists */
+	pthread_mutex_t		*locks;
+
 	/* Set to true to abort all threads. */
 	bool			aborted;
 
@@ -48,6 +51,24 @@ report_close_error(
 	str_errno(ctx, descr);
 }
 
+/*
+ * Defer all the repairs until phase 4, being careful about locking since the
+ * inode scrub threads are not per-AG.
+ */
+static void
+defer_inode_repair(
+	struct scrub_inode_ctx	*ictx,
+	xfs_agnumber_t		agno,
+	struct action_list	*alist)
+{
+	if (alist->nr == 0)
+		return;
+
+	pthread_mutex_lock(&ictx->locks[agno]);
+	action_list_defer(ictx->ctx, agno, alist);
+	pthread_mutex_unlock(&ictx->locks[agno]);
+}
+
 /* Run repair actions now and defer unfinished items for later. */
 static int
 try_inode_repair(
@@ -71,7 +92,7 @@ try_inode_repair(
 	if (ret)
 		return ret;
 
-	action_list_defer(ictx->ctx, agno, alist);
+	defer_inode_repair(ictx, agno, alist);
 	return 0;
 }
 
@@ -184,7 +205,7 @@ scrub_inode(
 	progress_add(1);
 
 	if (!error && !ictx->aborted)
-		action_list_defer(ctx, agno, &alist);
+		defer_inode_repair(ictx, agno, &alist);
 
 	if (fd >= 0) {
 		int	err2;
@@ -217,12 +238,21 @@ phase3_func(
 		return err;
 	}
 
+	ictx.locks = calloc(ctx->mnt.fsgeom.agcount, sizeof(pthread_mutex_t));
+	if (!ictx.locks) {
+		str_errno(ctx, _("creating per-AG repair list locks"));
+		err = ENOMEM;
+		goto out_ptcounter;
+	}
+
 	/*
 	 * If we already have ag/fs metadata to repair from previous phases,
 	 * we would rather not try to repair file metadata until we've tried
 	 * to repair the space metadata.
 	 */
 	for (agno = 0; agno < ctx->mnt.fsgeom.agcount; agno++) {
+		pthread_mutex_init(&ictx.locks[agno], NULL);
+
 		if (!action_list_empty(&ctx->action_lists[agno]))
 			ictx.always_defer_repairs = true;
 	}
@@ -231,17 +261,21 @@ phase3_func(
 	if (!err && ictx.aborted)
 		err = ECANCELED;
 	if (err)
-		goto free;
+		goto out_locks;
 
 	scrub_report_preen_triggers(ctx);
 	err = ptcounter_value(ictx.icount, &val);
 	if (err) {
 		str_liberror(ctx, err, _("summing scanned inode counter"));
-		return err;
+		goto out_locks;
 	}
 
 	ctx->inodes_checked = val;
-free:
+out_locks:
+	for (agno = 0; agno < ctx->mnt.fsgeom.agcount; agno++)
+		pthread_mutex_destroy(&ictx.locks[agno]);
+	free(ictx.locks);
+out_ptcounter:
 	ptcounter_free(ictx.icount);
 	return err;
 }




[Index of Archives]     [XFS Filesystem Development (older mail)]     [Linux Filesystem Development]     [Linux Audio Users]     [Yosemite Trails]     [Linux Kernel]     [Linux RAID]     [Linux SCSI]


  Powered by Linux