From: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Currently whenever a new task is created we wait for sysctl_numa_balancing_scan_delay to avoid unnessary scanning overhead. Extend the same logic to new or very short-lived VMAs. (Raghavendra: Add initialization in vm_area_dup()) Signed-off-by: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Signed-off-by: Raghavendra K T <raghavendra.kt@xxxxxxx> --- include/linux/mm.h | 16 ++++++++++++++++ include/linux/mm_types.h | 7 +++++++ kernel/fork.c | 2 ++ kernel/sched/fair.c | 19 +++++++++++++++++++ 4 files changed, 44 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index 974ccca609d2..41cc8997d4e5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -29,6 +29,7 @@ #include <linux/pgtable.h> #include <linux/kasan.h> #include <linux/memremap.h> +#include <linux/slab.h> struct mempolicy; struct anon_vma; @@ -611,6 +612,20 @@ struct vm_operations_struct { unsigned long addr); }; +#ifdef CONFIG_NUMA_BALANCING +static inline void vma_numab_state_init(struct vm_area_struct *vma) +{ + vma->numab_state = NULL; +} +static inline void vma_numab_state_free(struct vm_area_struct *vma) +{ + kfree(vma->numab_state); +} +#else +static inline void vma_numab_state_init(struct vm_area_struct *vma) {} +static inline void vma_numab_state_free(struct vm_area_struct *vma) {} +#endif /* CONFIG_NUMA_BALANCING */ + static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) { static const struct vm_operations_struct dummy_vm_ops = {}; @@ -619,6 +634,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_mm = mm; vma->vm_ops = &dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); + vma_numab_state_init(vma); } static inline void vma_set_anonymous(struct vm_area_struct *vma) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 500e536796ca..a4a1093870d3 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -435,6 +435,10 @@ struct anon_vma_name { char name[]; }; +struct vma_numab_state { + unsigned long next_scan; +}; + /* * This struct describes a virtual memory area. There is one of these * per VM-area/task. A VM area is any part of the process virtual memory @@ -504,6 +508,9 @@ struct vm_area_struct { #endif #ifdef CONFIG_NUMA struct mempolicy *vm_policy; /* NUMA policy for the VMA */ +#endif +#ifdef CONFIG_NUMA_BALANCING + struct vma_numab_state *numab_state; /* NUMA Balancing state */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } __randomize_layout; diff --git a/kernel/fork.c b/kernel/fork.c index 08969f5aa38d..6c19a3305990 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -474,6 +474,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) */ *new = data_race(*orig); INIT_LIST_HEAD(&new->anon_vma_chain); + vma_numab_state_init(new); dup_anon_vma_name(orig, new); } return new; @@ -481,6 +482,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) void vm_area_free(struct vm_area_struct *vma) { + vma_numab_state_free(vma); free_anon_vma_name(vma); kmem_cache_free(vm_area_cachep, vma); } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e4a0b8bd941c..e39c36e71cec 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3015,6 +3015,25 @@ static void task_numa_work(struct callback_head *work) if (!vma_is_accessible(vma)) continue; + /* Initialise new per-VMA NUMAB state. */ + if (!vma->numab_state) { + vma->numab_state = kzalloc(sizeof(struct vma_numab_state), + GFP_KERNEL); + if (!vma->numab_state) + continue; + + vma->numab_state->next_scan = now + + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); + } + + /* + * Scanning the VMA's of short lived tasks add more overhead. So + * delay the scan for new VMAs. + */ + if (mm->numa_scan_seq && time_before(jiffies, + vma->numab_state->next_scan)) + continue; + do { start = max(start, vma->vm_start); end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); -- 2.34.1