The following functions/tracepoints are shared between khugepaged and madvise collapse contexts. Remove the khugepaged prefixes. tracepoint:mm_khugepaged_scan_pmd -> tracepoint:mm_scan_pmd khugepaged_test_exit() -> test_exit() khugepaged_scan_abort() -> scan_abort() khugepaged_scan_pmd() -> scan_pmd() khugepaged_find_target_node() -> find_target_node() Signed-off-by: Zach O'Keefe <zokeefe@xxxxxxxxxx> --- include/trace/events/huge_memory.h | 2 +- mm/khugepaged.c | 68 ++++++++++++++---------------- 2 files changed, 33 insertions(+), 37 deletions(-) diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 9faa678e0a5b..09be0e2f76b1 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -48,7 +48,7 @@ SCAN_STATUS #define EM(a, b) {a, b}, #define EMe(a, b) {a, b} -TRACE_EVENT(mm_khugepaged_scan_pmd, +TRACE_EVENT(mm_scan_pmd, TP_PROTO(struct mm_struct *mm, struct page *page, bool writable, int referenced, int none_or_zero, int status, int unmapped), diff --git a/mm/khugepaged.c b/mm/khugepaged.c index c5c484b7e394..2717262d1832 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -90,7 +90,7 @@ struct collapse_control { /* Num pages scanned per node */ int node_load[MAX_NUMNODES]; - /* Last target selected in khugepaged_find_target_node() for this scan */ + /* Last target selected in find_target_node() for this scan */ int last_target_node; struct page *hpage; @@ -453,7 +453,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm, hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); } -static inline int khugepaged_test_exit(struct mm_struct *mm) +static inline int test_exit(struct mm_struct *mm) { return atomic_read(&mm->mm_users) == 0; } @@ -505,7 +505,7 @@ void __khugepaged_enter(struct mm_struct *mm) return; /* __khugepaged_exit() must not run from under us */ - VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); + VM_BUG_ON_MM(test_exit(mm), mm); if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { free_mm_slot(mm_slot); return; @@ -557,12 +557,11 @@ void __khugepaged_exit(struct mm_struct *mm) mmdrop(mm); } else if (mm_slot) { /* - * This is required to serialize against - * khugepaged_test_exit() (which is guaranteed to run - * under mmap sem read mode). Stop here (after we - * return all pagetables will be destroyed) until - * khugepaged has finished working on the pagetables - * under the mmap_lock. + * This is required to serialize against test_exit() (which is + * guaranteed to run under mmap sem read mode). Stop here + * (after we return all pagetables will be destroyed) until + * khugepaged has finished working on the pagetables under + * the mmap_lock. */ mmap_write_lock(mm); mmap_write_unlock(mm); @@ -816,7 +815,7 @@ static void khugepaged_alloc_sleep(void) remove_wait_queue(&khugepaged_wait, &wait); } -static bool khugepaged_scan_abort(int nid, struct collapse_control *cc) +static bool scan_abort(int nid, struct collapse_control *cc) { int i; @@ -846,7 +845,7 @@ static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT; } -static int khugepaged_find_target_node(struct collapse_control *cc) +static int find_target_node(struct collapse_control *cc) { int nid, target_node = 0, max_value = 0; @@ -993,7 +992,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, struct vm_area_struct *vma; unsigned long hstart, hend; - if (unlikely(khugepaged_test_exit(mm))) + if (unlikely(test_exit(mm))) return SCAN_ANY_PROCESS; *vmap = vma = find_vma(mm, address); @@ -1037,7 +1036,7 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm, /* * Bring missing pages in from swap, to complete THP collapse. - * Only done if khugepaged_scan_pmd believes it is worthwhile. + * Only done if scan_pmd believes it is worthwhile. * * Called and returns without pte mapped or spinlocks held, * but with mmap_lock held to protect against vma changes. @@ -1129,7 +1128,7 @@ static void collapse_huge_page(struct mm_struct *mm, unsigned long address, mmap_read_unlock(mm); cr->dropped_mmap_lock = true; - node = khugepaged_find_target_node(cc); + node = find_target_node(cc); /* sched to specified node before huage page memory copy */ if (task_node(current) != node) { cpumask = cpumask_of_node(node); @@ -1270,11 +1269,9 @@ static void collapse_huge_page(struct mm_struct *mm, unsigned long address, return; } -static void khugepaged_scan_pmd(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long address, - struct collapse_control *cc, - struct collapse_result *cr) +static void scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, struct collapse_control *cc, + struct collapse_result *cr) { pmd_t *pmd; pte_t *pte, *_pte; @@ -1364,7 +1361,7 @@ static void khugepaged_scan_pmd(struct mm_struct *mm, * hit record. */ node = page_to_nid(page); - if (khugepaged_scan_abort(node, cc)) { + if (scan_abort(node, cc)) { cr->result = SCAN_SCAN_ABORT; goto out_unmap; } @@ -1421,8 +1418,8 @@ static void khugepaged_scan_pmd(struct mm_struct *mm, /* collapse_huge_page will return with the mmap_lock released */ collapse_huge_page(mm, address, cc, referenced, unmapped, cr); out: - trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, - none_or_zero, cr->result, unmapped); + trace_mm_scan_pmd(mm, page, writable, referenced, none_or_zero, + cr->result, unmapped); } static void collect_mm_slot(struct mm_slot *mm_slot) @@ -1431,7 +1428,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot) lockdep_assert_held(&khugepaged_mm_lock); - if (khugepaged_test_exit(mm)) { + if (test_exit(mm)) { /* free mm_slot */ hash_del(&mm_slot->hash); list_del(&mm_slot->mm_node); @@ -1598,7 +1595,7 @@ static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot) if (!mmap_write_trylock(mm)) return; - if (unlikely(khugepaged_test_exit(mm))) + if (unlikely(test_exit(mm))) goto out; for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++) @@ -1653,7 +1650,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) * reverse order. Trylock is a way to avoid deadlock. */ if (mmap_write_trylock(mm)) { - if (!khugepaged_test_exit(mm)) + if (!test_exit(mm)) collapse_and_free_pmd(mm, vma, addr, pmd); mmap_write_unlock(mm); } else { @@ -1710,7 +1707,7 @@ static void collapse_file(struct mm_struct *mm, /* Only allocate from the target node */ gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE; - node = khugepaged_find_target_node(cc); + node = find_target_node(cc); new_page = cc->alloc_hpage(cc, gfp, node); if (!new_page) { @@ -2094,7 +2091,7 @@ static void khugepaged_scan_file(struct mm_struct *mm, } node = page_to_nid(page); - if (khugepaged_scan_abort(node, cc)) { + if (scan_abort(node, cc)) { cr->result = SCAN_SCAN_ABORT; break; } @@ -2183,7 +2180,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, vma = NULL; if (unlikely(!mmap_read_trylock(mm))) goto breakouterloop_mmap_lock; - if (likely(!khugepaged_test_exit(mm))) + if (likely(!test_exit(mm))) vma = find_vma(mm, khugepaged_scan.address); progress++; @@ -2191,7 +2188,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, unsigned long hstart, hend; cond_resched(); - if (unlikely(khugepaged_test_exit(mm))) { + if (unlikely(test_exit(mm))) { progress++; break; } @@ -2215,7 +2212,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, while (khugepaged_scan.address < hend) { struct collapse_result cr = {0}; cond_resched(); - if (unlikely(khugepaged_test_exit(mm))) + if (unlikely(test_exit(mm))) goto breakouterloop; VM_BUG_ON(khugepaged_scan.address < hstart || @@ -2231,9 +2228,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, khugepaged_scan_file(mm, file, pgoff, cc, &cr); fput(file); } else { - khugepaged_scan_pmd(mm, vma, - khugepaged_scan.address, - cc, &cr); + scan_pmd(mm, vma, khugepaged_scan.address, cc, + &cr); } if (cr.result == SCAN_SUCCEED) ++khugepaged_pages_collapsed; @@ -2257,7 +2253,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, * Release the current mm_slot if this mm is about to die, or * if we scanned all vmas of this mm. */ - if (khugepaged_test_exit(mm) || !vma) { + if (test_exit(mm) || !vma) { /* * Make sure that if mm_users is reaching zero while * khugepaged runs here, khugepaged_exit will find @@ -2528,11 +2524,11 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, cond_resched(); memset(&cr, 0, sizeof(cr)); - if (unlikely(khugepaged_test_exit(mm))) + if (unlikely(test_exit(mm))) break; memset(cc.node_load, 0, sizeof(cc.node_load)); - khugepaged_scan_pmd(mm, vma, addr, &cc, &cr); + scan_pmd(mm, vma, addr, &cc, &cr); if (cr.dropped_mmap_lock) *prev = NULL; /* tell madvise we dropped mmap_lock */ -- 2.35.1.1178.g4f1659d476-goog