[RFC v3 PATCH] xfs: automatic relogging experiment

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



POC to automatically relog the quotaoff start intent. This approach
involves no reservation stealing nor transaction rolling, so
deadlock avoidance is not guaranteed. The tradeoff is simplicity and
an approach that might be effective enough in practice.

Signed-off-by: Brian Foster <bfoster@xxxxxxxxxx>
---

Here's a quickly hacked up version of what I was rambling about in the
cover letter. I wanted to post this for comparison. As noted above, this
doesn't necessarily guarantee deadlock avoidance with transaction
rolling, etc., but might be good enough in practice for the current use
cases (particularly with CIL context size fixes). Even if not, there's a
clear enough path to tracking relog reservation with a ticket in the CIL
context in a manner that is more conducive to batching. We also may be
able to union ->li_cb() with a ->li_relog() variant to relog intent
items as dfops currently does for things like EFIs that don't currently
support direct relogging of the same object.

Thoughts about using something like this as an intermediate solution,
provided it holds up against some stress testing?

Brian

 fs/xfs/xfs_log.c         |  1 +
 fs/xfs/xfs_log_cil.c     | 50 +++++++++++++++++++++++++++++++++++++++-
 fs/xfs/xfs_log_priv.h    |  2 ++
 fs/xfs/xfs_qm_syscalls.c |  6 +++++
 fs/xfs/xfs_trans.h       |  5 +++-
 5 files changed, 62 insertions(+), 2 deletions(-)

diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 6a147c63a8a6..4fb3c3156ea2 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1086,6 +1086,7 @@ xfs_log_item_init(
 	INIT_LIST_HEAD(&item->li_cil);
 	INIT_LIST_HEAD(&item->li_bio_list);
 	INIT_LIST_HEAD(&item->li_trans);
+	INIT_LIST_HEAD(&item->li_ril);
 }
 
 /*
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 48435cf2aa16..c16ebc448a40 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -19,6 +19,44 @@
 
 struct workqueue_struct *xfs_discard_wq;
 
+static void
+xfs_relog_worker(
+	struct work_struct	*work)
+{
+	struct xfs_cil_ctx	*ctx = container_of(work, struct xfs_cil_ctx, relog_work);
+	struct xfs_mount	*mp = ctx->cil->xc_log->l_mp;
+	struct xfs_trans	*tp;
+	struct xfs_log_item	*lip, *lipp;
+	int			error;
+
+	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
+	ASSERT(!error);
+
+	list_for_each_entry_safe(lip, lipp, &ctx->relog_list, li_ril) {
+		list_del_init(&lip->li_ril);
+
+		if (!test_bit(XFS_LI_RELOG, &lip->li_flags))
+			continue;
+
+		xfs_trans_add_item(tp, lip);
+		set_bit(XFS_LI_DIRTY, &lip->li_flags);
+		tp->t_flags |= XFS_TRANS_DIRTY;
+	}
+
+	error = xfs_trans_commit(tp);
+	ASSERT(!error);
+
+	/* XXX */
+	kmem_free(ctx);
+}
+
+static void
+xfs_relog_queue(
+	struct xfs_cil_ctx	*ctx)
+{
+	queue_work(xfs_discard_wq, &ctx->relog_work);
+}
+
 /*
  * Allocate a new ticket. Failing to get a new ticket makes it really hard to
  * recover, so we don't allow failure here. Also, we allocate in a context that
@@ -476,6 +514,9 @@ xlog_cil_insert_items(
 		 */
 		if (!list_is_last(&lip->li_cil, &cil->xc_cil))
 			list_move_tail(&lip->li_cil, &cil->xc_cil);
+
+		if (test_bit(XFS_LI_RELOG, &lip->li_flags))
+			list_move_tail(&lip->li_ril, &ctx->relog_list);
 	}
 
 	spin_unlock(&cil->xc_cil_lock);
@@ -605,7 +646,10 @@ xlog_cil_committed(
 
 	xlog_cil_free_logvec(ctx->lv_chain);
 
-	if (!list_empty(&ctx->busy_extents))
+	/* XXX: mutually exclusive w/ discard for POC to handle ctx freeing */
+	if (!list_empty(&ctx->relog_list))
+		xfs_relog_queue(ctx);
+	else if (!list_empty(&ctx->busy_extents))
 		xlog_discard_busy_extents(mp, ctx);
 	else
 		kmem_free(ctx);
@@ -746,8 +790,10 @@ xlog_cil_push(
 	 */
 	INIT_LIST_HEAD(&new_ctx->committing);
 	INIT_LIST_HEAD(&new_ctx->busy_extents);
+	INIT_LIST_HEAD(&new_ctx->relog_list);
 	new_ctx->sequence = ctx->sequence + 1;
 	new_ctx->cil = cil;
+	INIT_WORK(&new_ctx->relog_work, xfs_relog_worker);
 	cil->xc_ctx = new_ctx;
 
 	/*
@@ -1199,6 +1245,8 @@ xlog_cil_init(
 
 	INIT_LIST_HEAD(&ctx->committing);
 	INIT_LIST_HEAD(&ctx->busy_extents);
+	INIT_LIST_HEAD(&ctx->relog_list);
+	INIT_WORK(&ctx->relog_work, xfs_relog_worker);
 	ctx->sequence = 1;
 	ctx->cil = cil;
 	cil->xc_ctx = ctx;
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index b192c5a9f9fd..6fd7b7297bd3 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -243,6 +243,8 @@ struct xfs_cil_ctx {
 	struct list_head	iclog_entry;
 	struct list_head	committing;	/* ctx committing list */
 	struct work_struct	discard_endio_work;
+	struct list_head	relog_list;
+	struct work_struct	relog_work;
 };
 
 /*
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 1ea82764bf89..08b6180cb5a3 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -18,6 +18,7 @@
 #include "xfs_quota.h"
 #include "xfs_qm.h"
 #include "xfs_icache.h"
+#include "xfs_log.h"
 
 STATIC int
 xfs_qm_log_quotaoff(
@@ -37,6 +38,7 @@ xfs_qm_log_quotaoff(
 
 	qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
 	xfs_trans_log_quotaoff_item(tp, qoffi);
+	set_bit(XFS_LI_RELOG, &qoffi->qql_item.li_flags);
 
 	spin_lock(&mp->m_sb_lock);
 	mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
@@ -69,6 +71,10 @@ xfs_qm_log_quotaoff_end(
 	int			error;
 	struct xfs_qoff_logitem	*qoffi;
 
+	clear_bit(XFS_LI_RELOG, &startqoff->qql_item.li_flags);
+	xfs_log_force(mp, XFS_LOG_SYNC);
+	flush_workqueue(xfs_discard_wq);
+
 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
 	if (error)
 		return error;
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 64d7f171ebd3..e04033c29f0d 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -48,6 +48,7 @@ struct xfs_log_item {
 	struct xfs_log_vec		*li_lv;		/* active log vector */
 	struct xfs_log_vec		*li_lv_shadow;	/* standby vector */
 	xfs_lsn_t			li_seq;		/* CIL commit seq */
+	struct list_head		li_ril;
 };
 
 /*
@@ -59,12 +60,14 @@ struct xfs_log_item {
 #define	XFS_LI_ABORTED	1
 #define	XFS_LI_FAILED	2
 #define	XFS_LI_DIRTY	3	/* log item dirty in transaction */
+#define	XFS_LI_RELOG	4	/* automatic relogging */
 
 #define XFS_LI_FLAGS \
 	{ (1 << XFS_LI_IN_AIL),		"IN_AIL" }, \
 	{ (1 << XFS_LI_ABORTED),	"ABORTED" }, \
 	{ (1 << XFS_LI_FAILED),		"FAILED" }, \
-	{ (1 << XFS_LI_DIRTY),		"DIRTY" }
+	{ (1 << XFS_LI_DIRTY),		"DIRTY" }, \
+	{ (1 << XFS_LI_RELOG),		"RELOG" }
 
 struct xfs_item_ops {
 	unsigned flags;
-- 
2.20.1





[Index of Archives]     [XFS Filesystem Development (older mail)]     [Linux Filesystem Development]     [Linux Audio Users]     [Yosemite Trails]     [Linux Kernel]     [Linux RAID]     [Linux SCSI]


  Powered by Linux