[RFC PATCH 4/5] mm: add support for DMA folio Migration

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Mike Day <michael.day@xxxxxxx>

DMA drivers should implement following functions to enable folio migration
offloading:
migrate_dma() - This function takes src and dst folios list undergoing
migration. It is responsible for transfer of page content between the
src and dst folios.
can_migrate_dma() - It performs necessary checks if DMA-migration is
supported for the give src and dst folios.

DMA driver should include a mechanism to call start_offloading and
stop_offloading for enabling and disabling migration offload respectively.

Signed-off-by: Mike Day <michael.day@xxxxxxx>
Signed-off-by: Shivank Garg <shivankg@xxxxxxx>
---
 include/linux/migrate_dma.h | 36 ++++++++++++++++++++++++++
 mm/Kconfig                  |  8 ++++++
 mm/Makefile                 |  1 +
 mm/migrate.c                | 40 +++++++++++++++++++++++++++--
 mm/migrate_dma.c            | 51 +++++++++++++++++++++++++++++++++++++
 5 files changed, 134 insertions(+), 2 deletions(-)
 create mode 100644 include/linux/migrate_dma.h
 create mode 100644 mm/migrate_dma.c

diff --git a/include/linux/migrate_dma.h b/include/linux/migrate_dma.h
new file mode 100644
index 000000000000..307b234450c3
--- /dev/null
+++ b/include/linux/migrate_dma.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _MIGRATE_DMA_H
+#define _MIGRATE_DMA_H
+#include <linux/migrate_mode.h>
+
+#define MIGRATOR_NAME_LEN 32
+struct migrator {
+	char name[MIGRATOR_NAME_LEN];
+	void (*migrate_dma)(struct list_head *dst_list, struct list_head *src_list);
+	bool (*can_migrate_dma)(struct folio *dst, struct folio *src);
+	struct rcu_head srcu_head;
+	struct module *owner;
+};
+
+extern struct migrator migrator;
+extern struct mutex migrator_mut;
+extern struct srcu_struct mig_srcu;
+
+#ifdef CONFIG_DMA_MIGRATION
+void srcu_mig_cb(struct rcu_head *head);
+void dma_update_migrator(struct migrator *mig);
+unsigned char *get_active_migrator_name(void);
+bool can_dma_migrate(struct folio *dst, struct folio *src);
+void start_offloading(struct migrator *migrator);
+void stop_offloading(void);
+#else
+static inline void srcu_mig_cb(struct rcu_head *head) { };
+static inline void dma_update_migrator(struct migrator *mig) { };
+static inline unsigned char *get_active_migrator_name(void) { return NULL; };
+static inline bool can_dma_migrate(struct folio *dst, struct folio *src) {return true; };
+static inline void start_offloading(struct migrator *migrator) { };
+static inline void stop_offloading(void) { };
+#endif /* CONFIG_DMA_MIGRATION */
+
+#endif /* _MIGRATE_DMA_H */
diff --git a/mm/Kconfig b/mm/Kconfig
index ffc3a2ba3a8c..e3ff6583fedb 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -662,6 +662,14 @@ config MIGRATION
 config DEVICE_MIGRATION
 	def_bool MIGRATION && ZONE_DEVICE
 
+config DMA_MIGRATION
+	bool "Migrate Pages offloading copy to DMA"
+	def_bool n
+	depends on MIGRATION
+	help
+	 An interface allowing external modules or driver to offload
+	 page copying in page migration.
+
 config ARCH_ENABLE_HUGEPAGE_MIGRATION
 	bool
 
diff --git a/mm/Makefile b/mm/Makefile
index e4b5b75aaec9..1e31fb79d700 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -87,6 +87,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
 obj-$(CONFIG_FAIL_PAGE_ALLOC) += fail_page_alloc.o
 obj-$(CONFIG_MEMTEST)		+= memtest.o
 obj-$(CONFIG_MIGRATION) += migrate.o
+obj-$(CONFIG_DMA_MIGRATION) += migrate_dma.o
 obj-$(CONFIG_NUMA) += memory-tiers.o
 obj-$(CONFIG_DEVICE_MIGRATION) += migrate_device.o
 obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o
diff --git a/mm/migrate.c b/mm/migrate.c
index fce69a494742..db826e3862a1 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -50,6 +50,7 @@
 #include <linux/random.h>
 #include <linux/sched/sysctl.h>
 #include <linux/memory-tiers.h>
+#include <linux/migrate_dma.h>
 
 #include <asm/tlbflush.h>
 
@@ -656,6 +657,37 @@ void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
 }
 EXPORT_SYMBOL(folio_migrate_copy);
 
+DEFINE_STATIC_CALL(_folios_copy, folios_copy);
+DEFINE_STATIC_CALL(_can_dma_migrate, can_dma_migrate);
+
+#ifdef CONFIG_DMA_MIGRATION
+void srcu_mig_cb(struct rcu_head *head)
+{
+	static_call_query(_folios_copy);
+}
+
+void dma_update_migrator(struct migrator *mig)
+{
+	int index;
+
+	mutex_lock(&migrator_mut);
+	index = srcu_read_lock(&mig_srcu);
+	strscpy(migrator.name, mig ? mig->name : "kernel", MIGRATOR_NAME_LEN);
+	static_call_update(_folios_copy, mig ? mig->migrate_dma : folios_copy);
+	static_call_update(_can_dma_migrate, mig ? mig->can_migrate_dma : can_dma_migrate);
+	if (READ_ONCE(migrator.owner))
+		module_put(migrator.owner);
+	xchg(&migrator.owner, mig ? mig->owner : NULL);
+	if (READ_ONCE(migrator.owner))
+		try_module_get(migrator.owner);
+	srcu_read_unlock(&mig_srcu, index);
+	mutex_unlock(&migrator_mut);
+	call_srcu(&mig_srcu, &migrator.srcu_head, srcu_mig_cb);
+	srcu_barrier(&mig_srcu);
+}
+
+#endif /* CONFIG_DMA_MIGRATION */
+
 /************************************************************
  *                    Migration functions
  ***********************************************************/
@@ -1686,6 +1718,7 @@ static void migrate_folios_batch_move(struct list_head *src_folios,
 	struct anon_vma *anon_vma = NULL;
 	bool is_lru;
 	int is_thp = 0;
+	bool can_migrate = true;
 	struct migrate_folio_info *mig_info, *mig_info2;
 	LIST_HEAD(temp_src_folios);
 	LIST_HEAD(temp_dst_folios);
@@ -1720,7 +1753,10 @@ static void migrate_folios_batch_move(struct list_head *src_folios,
 		 * This does everything except the page copy. The actual page copy
 		 * is handled later in a batch manner.
 		 */
-		if (likely(is_lru)) {
+		can_migrate = static_call(_can_dma_migrate)(dst, folio);
+		if (unlikely(!can_migrate))
+			rc = -EAGAIN;
+		else if (likely(is_lru)) {
 			struct address_space *mapping = folio_mapping(folio);
 
 			if (!mapping)
@@ -1786,7 +1822,7 @@ static void migrate_folios_batch_move(struct list_head *src_folios,
 		goto out;
 
 	/* Batch copy the folios */
-	folios_copy(dst_folios, src_folios);
+	static_call(_folios_copy)(dst_folios, src_folios);
 
 	/*
 	 * Iterate the folio lists to remove migration pte and restore them
diff --git a/mm/migrate_dma.c b/mm/migrate_dma.c
new file mode 100644
index 000000000000..c8b078fdff17
--- /dev/null
+++ b/mm/migrate_dma.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/migrate.h>
+#include <linux/migrate_dma.h>
+#include <linux/rculist.h>
+#include <linux/static_call.h>
+
+atomic_t dispatch_to_dma = ATOMIC_INIT(0);
+EXPORT_SYMBOL_GPL(dispatch_to_dma);
+
+DEFINE_MUTEX(migrator_mut);
+DEFINE_SRCU(mig_srcu);
+
+struct migrator migrator = {
+	.name = "kernel",
+	.migrate_dma = folios_copy,
+	.can_migrate_dma = can_dma_migrate,
+	.srcu_head.func = srcu_mig_cb,
+	.owner = NULL,
+};
+
+bool can_dma_migrate(struct folio *dst, struct folio *src)
+{
+	return true;
+}
+EXPORT_SYMBOL_GPL(can_dma_migrate);
+
+void start_offloading(struct migrator *m)
+{
+	int offloading = 0;
+
+	pr_info("starting migration offload by %s\n", m->name);
+	dma_update_migrator(m);
+	atomic_try_cmpxchg(&dispatch_to_dma, &offloading, 1);
+}
+EXPORT_SYMBOL_GPL(start_offloading);
+
+void stop_offloading(void)
+{
+	int offloading = 1;
+
+	pr_info("stopping migration offload by %s\n", migrator.name);
+	dma_update_migrator(NULL);
+	atomic_try_cmpxchg(&dispatch_to_dma, &offloading, 0);
+}
+EXPORT_SYMBOL_GPL(stop_offloading);
+
+unsigned char *get_active_migrator_name(void)
+{
+	return migrator.name;
+}
+EXPORT_SYMBOL_GPL(get_active_migrator_name);
-- 
2.34.1





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux