Since we already have the list of mm_struct in the system, add a module to scan each mm that walks VMAs of each mm_struct and scan all the pages associated with that. In the scan path: Check for the recently acccessed pages (folios) belonging to slowtier nodes. Add all those folios to a migration list. Signed-off-by: Raghavendra K T <raghavendra.kt@xxxxxxx> --- mm/kmmscand.c | 323 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 321 insertions(+), 2 deletions(-) diff --git a/mm/kmmscand.c b/mm/kmmscand.c index 36d0fea31dea..a76a58bf37b2 100644 --- a/mm/kmmscand.c +++ b/mm/kmmscand.c @@ -4,10 +4,18 @@ #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/mmu_notifier.h> +#include <linux/rmap.h> +#include <linux/pagewalk.h> +#include <linux/page_ext.h> +#include <linux/page_idle.h> +#include <linux/page_table_check.h> +#include <linux/pagemap.h> #include <linux/swap.h> #include <linux/mm_inline.h> #include <linux/kthread.h> #include <linux/kmmscand.h> +#include <linux/memory-tiers.h> +#include <linux/mempolicy.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/cleanup.h> @@ -18,6 +26,11 @@ static struct task_struct *kmmscand_thread __read_mostly; static DEFINE_MUTEX(kmmscand_mutex); +/* + * Total VMA size to cover during scan. + */ +#define KMMSCAND_SCAN_SIZE (1 * 1024 * 1024 * 1024UL) +static unsigned long kmmscand_scan_size __read_mostly = KMMSCAND_SCAN_SIZE; /* How long to pause between two scan and migration cycle */ static unsigned int kmmscand_scan_sleep_ms __read_mostly = 16; @@ -39,10 +52,14 @@ static DEFINE_READ_MOSTLY_HASHTABLE(kmmscand_slots_hash, KMMSCAND_SLOT_HASH_BITS static struct kmem_cache *kmmscand_slot_cache __read_mostly; +/* Per mm information collected to control VMA scanning */ struct kmmscand_mm_slot { struct mm_slot slot; + long address; + bool is_scanned; }; +/* Data structure to keep track of current mm under scan */ struct kmmscand_scan { struct list_head mm_head; struct kmmscand_mm_slot *mm_slot; @@ -52,6 +69,33 @@ struct kmmscand_scan kmmscand_scan = { .mm_head = LIST_HEAD_INIT(kmmscand_scan.mm_head), }; +/* + * Data structure passed to control scanning and also collect + * per memory node information + */ +struct kmmscand_scanctrl { + struct list_head scan_list; + unsigned long address; +}; + +struct kmmscand_scanctrl kmmscand_scanctrl; + +/* Per folio information used for migration */ +struct kmmscand_migrate_info { + struct list_head migrate_node; + struct mm_struct *mm; + struct folio *folio; + unsigned long address; +}; + +static bool kmmscand_eligible_srcnid(int nid) +{ + if (!node_is_toptier(nid)) + return true; + + return false; +} + static int kmmscand_has_work(void) { return !list_empty(&kmmscand_scan.mm_head); @@ -82,15 +126,277 @@ static void kmmscand_wait_work(void) return; } + +static inline bool is_valid_folio(struct folio *folio) +{ + if (!folio || folio_test_unevictable(folio) || !folio_mapped(folio) || + folio_is_zone_device(folio) || folio_likely_mapped_shared(folio)) + return false; + + return true; +} + +static bool folio_idle_clear_pte_refs_one(struct folio *folio, + struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep) +{ + bool referenced = false; + struct mm_struct *mm = vma->vm_mm; + pmd_t *pmd = pmd_off(mm, addr); + + if (ptep) { + if (ptep_clear_young_notify(vma, addr, ptep)) + referenced = true; + } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { + if (!pmd_present(*pmd)) + WARN_ON_ONCE(1); + if (pmdp_clear_young_notify(vma, addr, pmd)) + referenced = true; + } else { + WARN_ON_ONCE(1); + } + + if (referenced) { + folio_clear_idle(folio); + folio_set_young(folio); + } + + return true; +} + +static void page_idle_clear_pte_refs(struct page *page, pte_t *pte, struct mm_walk *walk) +{ + bool need_lock; + struct folio *folio = page_folio(page); + unsigned long address; + + if (!folio_mapped(folio) || !folio_raw_mapping(folio)) + return; + + need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); + if (need_lock && !folio_trylock(folio)) + return; + address = vma_address(walk->vma, page_pgoff(folio, page), compound_nr(page)); + VM_BUG_ON_VMA(address == -EFAULT, walk->vma); + folio_idle_clear_pte_refs_one(folio, walk->vma, address, pte); + + if (need_lock) + folio_unlock(folio); +} + +static int hot_vma_idle_pte_entry(pte_t *pte, + unsigned long addr, + unsigned long next, + struct mm_walk *walk) +{ + struct page *page; + struct folio *folio; + struct mm_struct *mm; + struct vm_area_struct *vma; + struct kmmscand_migrate_info *info; + struct kmmscand_scanctrl *scanctrl = walk->private; + int srcnid; + + scanctrl->address = addr; + pte_t pteval = ptep_get(pte); + + if (!pte_present(pteval)) + return 0; + + if (pte_none(pteval)) + return 0; + + vma = walk->vma; + mm = vma->vm_mm; + + page = pte_page(*pte); + + page_idle_clear_pte_refs(page, pte, walk); + + folio = page_folio(page); + folio_get(folio); + + if (!is_valid_folio(folio)) { + folio_put(folio); + return 0; + } + srcnid = folio_nid(folio); + + + if (!folio_test_lru(folio)) { + folio_put(folio); + return 0; + } + + if (!folio_test_idle(folio) || folio_test_young(folio) || + mmu_notifier_test_young(mm, addr) || + folio_test_referenced(folio) || pte_young(pteval)) { + + /* Do not try to promote pages from regular nodes */ + if (!kmmscand_eligible_srcnid(srcnid)) { + folio_put(folio); + return 0; + } + /* XXX: Leaking memory. TBD: consume info */ + info = kzalloc(sizeof(struct kmmscand_migrate_info), GFP_NOWAIT); + if (info && scanctrl) { + + info->mm = mm; + info->address = addr; + info->folio = folio; + + /* No need of lock now */ + list_add_tail(&info->migrate_node, &scanctrl->scan_list); + } + } + + folio_set_idle(folio); + folio_put(folio); + return 0; +} + +static const struct mm_walk_ops hot_vma_set_idle_ops = { + .pte_entry = hot_vma_idle_pte_entry, + .walk_lock = PGWALK_RDLOCK, +}; + +static void kmmscand_walk_page_vma(struct vm_area_struct *vma, struct kmmscand_scanctrl *scanctrl) +{ + if (!vma_migratable(vma) || !vma_policy_mof(vma) || + is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { + return; + } + if (!vma->vm_mm || + (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) + return; + + if (!vma_is_accessible(vma)) + return; + + walk_page_vma(vma, &hot_vma_set_idle_ops, scanctrl); +} + static inline int kmmscand_test_exit(struct mm_struct *mm) { return atomic_read(&mm->mm_users) == 0; } +static void kmmscand_collect_mm_slot(struct kmmscand_mm_slot *mm_slot) +{ + struct mm_slot *slot = &mm_slot->slot; + struct mm_struct *mm = slot->mm; + + lockdep_assert_held(&kmmscand_mm_lock); + + if (kmmscand_test_exit(mm)) { + /* free mm_slot */ + hash_del(&slot->hash); + list_del(&slot->mm_node); + + mm_slot_free(kmmscand_slot_cache, mm_slot); + mmdrop(mm); + } +} + static unsigned long kmmscand_scan_mm_slot(void) { - /* placeholder for scanning */ - msleep(100); + bool next_mm = false; + bool update_mmslot_info = false; + + unsigned long vma_scanned_size = 0; + unsigned long address; + + struct mm_slot *slot; + struct mm_struct *mm; + struct vm_area_struct *vma = NULL; + struct kmmscand_mm_slot *mm_slot; + + /* Retrieve mm */ + spin_lock(&kmmscand_mm_lock); + + if (kmmscand_scan.mm_slot) { + mm_slot = kmmscand_scan.mm_slot; + slot = &mm_slot->slot; + address = mm_slot->address; + } else { + slot = list_entry(kmmscand_scan.mm_head.next, + struct mm_slot, mm_node); + mm_slot = mm_slot_entry(slot, struct kmmscand_mm_slot, slot); + address = mm_slot->address; + kmmscand_scan.mm_slot = mm_slot; + } + + mm = slot->mm; + mm_slot->is_scanned = true; + spin_unlock(&kmmscand_mm_lock); + + if (unlikely(!mmap_read_trylock(mm))) + goto outerloop_mmap_lock; + + if (unlikely(kmmscand_test_exit(mm))) { + next_mm = true; + goto outerloop; + } + + VMA_ITERATOR(vmi, mm, address); + + for_each_vma(vmi, vma) { + kmmscand_walk_page_vma(vma, &kmmscand_scanctrl); + vma_scanned_size += vma->vm_end - vma->vm_start; + + if (vma_scanned_size >= kmmscand_scan_size) { + next_mm = true; + /* TBD: Add scanned folios to migration list */ + break; + } + } + + if (!vma) + address = 0; + else + address = kmmscand_scanctrl.address + PAGE_SIZE; + + update_mmslot_info = true; + + if (update_mmslot_info) + mm_slot->address = address; + +outerloop: + /* exit_mmap will destroy ptes after this */ + mmap_read_unlock(mm); + +outerloop_mmap_lock: + spin_lock(&kmmscand_mm_lock); + WARN_ON(kmmscand_scan.mm_slot != mm_slot); + + /* + * Release the current mm_slot if this mm is about to die, or + * if we scanned all vmas of this mm. + */ + if (unlikely(kmmscand_test_exit(mm)) || !vma || next_mm) { + /* + * Make sure that if mm_users is reaching zero while + * kmmscand runs here, kmmscand_exit will find + * mm_slot not pointing to the exiting mm. + */ + if (slot->mm_node.next != &kmmscand_scan.mm_head) { + slot = list_entry(slot->mm_node.next, + struct mm_slot, mm_node); + kmmscand_scan.mm_slot = + mm_slot_entry(slot, struct kmmscand_mm_slot, slot); + + } else + kmmscand_scan.mm_slot = NULL; + + if (kmmscand_test_exit(mm)) { + kmmscand_collect_mm_slot(mm_slot); + goto end; + } + } + mm_slot->is_scanned = false; +end: + spin_unlock(&kmmscand_mm_lock); return 0; } @@ -153,6 +459,7 @@ void __kmmscand_enter(struct mm_struct *mm) if (!kmmscand_slot) return; + kmmscand_slot->address = 0; slot = &kmmscand_slot->slot; spin_lock(&kmmscand_mm_lock); @@ -180,6 +487,12 @@ void __kmmscand_exit(struct mm_struct *mm) hash_del(&slot->hash); list_del(&slot->mm_node); free = 1; + } else if (mm_slot && kmmscand_scan.mm_slot == mm_slot && !mm_slot->is_scanned) { + hash_del(&slot->hash); + list_del(&slot->mm_node); + free = 1; + /* TBD: Set the actual next slot */ + kmmscand_scan.mm_slot = NULL; } spin_unlock(&kmmscand_mm_lock); @@ -233,6 +546,11 @@ static int stop_kmmscand(void) return err; } +static void init_list(void) +{ + INIT_LIST_HEAD(&kmmscand_scanctrl.scan_list); + init_waitqueue_head(&kmmscand_wait); +} static int __init kmmscand_init(void) { @@ -245,6 +563,7 @@ static int __init kmmscand_init(void) return -ENOMEM; } + init_list(); err = start_kmmscand(); if (err) goto err_kmmscand; -- 2.34.1