[PATCH 04/13] fsverity: add per-sb workqueue for post read processing

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Andrey Albershteyn <aalbersh@xxxxxxxxxx>

For XFS, fsverity's global workqueue is not really suitable due to:

1. High priority workqueues are used within XFS to ensure that data
   IO completion cannot stall processing of journal IO completions.
   Hence using a WQ_HIGHPRI workqueue directly in the user data IO
   path is a potential filesystem livelock/deadlock vector.

2. The fsverity workqueue is global - it creates a cross-filesystem
   contention point.

This patch adds per-filesystem, per-cpu workqueue for fsverity
work. This allows iomap to add verification work in the read path on
BIO completion.

Signed-off-by: Andrey Albershteyn <aalbersh@xxxxxxxxxx>
[djwong: make it clearer that this workqueue is for verity]
Reviewed-by: Darrick J. Wong <djwong@xxxxxxxxxx>
Signed-off-by: Darrick J. Wong <djwong@xxxxxxxxxx>
---
 fs/super.c               |    7 +++++++
 fs/verity/verify.c       |   20 ++++++++++++++++++++
 include/linux/fs.h       |    2 ++
 include/linux/fsverity.h |   13 +++++++++++++
 4 files changed, 42 insertions(+)


diff --git a/fs/super.c b/fs/super.c
index 71d9779c42b10..aaa75131f6795 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -637,6 +637,13 @@ void generic_shutdown_super(struct super_block *sb)
 			sb->s_dio_done_wq = NULL;
 		}
 
+#ifdef CONFIG_FS_VERITY
+		if (sb->s_verify_wq) {
+			destroy_workqueue(sb->s_verify_wq);
+			sb->s_verify_wq = NULL;
+		}
+#endif
+
 		if (sop->put_super)
 			sop->put_super(sb);
 
diff --git a/fs/verity/verify.c b/fs/verity/verify.c
index 0b5e11073e883..0417862d5bd4a 100644
--- a/fs/verity/verify.c
+++ b/fs/verity/verify.c
@@ -342,6 +342,26 @@ void fsverity_verify_bio(struct bio *bio)
 EXPORT_SYMBOL_GPL(fsverity_verify_bio);
 #endif /* CONFIG_BLOCK */
 
+int __fsverity_init_verify_wq(struct super_block *sb)
+{
+	struct workqueue_struct *wq, *old;
+
+	wq = alloc_workqueue("fsverity/%s", WQ_MEM_RECLAIM | WQ_FREEZABLE, 0,
+			sb->s_id);
+	if (!wq)
+		return -ENOMEM;
+
+	/*
+	 * This has to be atomic as readaheads can race to create the
+	 * workqueue.  If someone created workqueue before us, we drop ours.
+	 */
+	old = cmpxchg(&sb->s_verify_wq, NULL, wq);
+	if (old)
+		destroy_workqueue(wq);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__fsverity_init_verify_wq);
+
 /**
  * fsverity_enqueue_verify_work() - enqueue work on the fs-verity workqueue
  * @work: the work to enqueue
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5d1c33573767f..2fc3c2d218ff8 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1230,6 +1230,8 @@ struct super_block {
 #endif
 #ifdef CONFIG_FS_VERITY
 	const struct fsverity_operations *s_vop;
+	/* Completion queue for post read verification */
+	struct workqueue_struct *s_verify_wq;
 #endif
 #if IS_ENABLED(CONFIG_UNICODE)
 	struct unicode_map *s_encoding;
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index c3478efd67d62..495708fb1f26a 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -252,6 +252,14 @@ bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset);
 void fsverity_verify_bio(struct bio *bio);
 void fsverity_enqueue_verify_work(struct work_struct *work);
 
+int __fsverity_init_verify_wq(struct super_block *sb);
+static inline int fsverity_init_verify_wq(struct super_block *sb)
+{
+	if (sb->s_verify_wq)
+		return 0;
+	return __fsverity_init_verify_wq(sb);
+}
+
 #else /* !CONFIG_FS_VERITY */
 
 static inline struct fsverity_info *fsverity_get_info(const struct inode *inode)
@@ -329,6 +337,11 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work)
 	WARN_ON_ONCE(1);
 }
 
+static inline int fsverity_init_verify_wq(struct super_block *sb)
+{
+	return -EOPNOTSUPP;
+}
+
 #endif	/* !CONFIG_FS_VERITY */
 
 static inline bool fsverity_verify_folio(struct folio *folio)





[Index of Archives]     [XFS Filesystem Development (older mail)]     [Linux Filesystem Development]     [Linux Audio Users]     [Yosemite Trails]     [Linux Kernel]     [Linux RAID]     [Linux SCSI]


  Powered by Linux