With Commit efaffc5e40ae ("mm, sched/numa: Remove rate-limiting of automatic NUMA balancing migration"), we no more require migrate lock and its initialization. Its redundant. Hence remove it. Signed-off-by: Srikar Dronamraju <srikar@xxxxxxxxxxxxxxxxxx> --- include/linux/mmzone.h | 4 ---- mm/page_alloc.c | 10 ---------- 2 files changed, 14 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3f4c0b1..d4b0c79 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -668,10 +668,6 @@ typedef struct pglist_data { wait_queue_head_t kcompactd_wait; struct task_struct *kcompactd; #endif -#ifdef CONFIG_NUMA_BALANCING - /* Lock serializing the migrate rate limiting window */ - spinlock_t numabalancing_migrate_lock; -#endif /* * This is a per-node reserve of pages that are not available * to userspace allocations. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 706a738..e2ef1c1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6193,15 +6193,6 @@ static unsigned long __init calc_memmap_size(unsigned long spanned_pages, return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; } -#ifdef CONFIG_NUMA_BALANCING -static void pgdat_init_numabalancing(struct pglist_data *pgdat) -{ - spin_lock_init(&pgdat->numabalancing_migrate_lock); -} -#else -static void pgdat_init_numabalancing(struct pglist_data *pgdat) {} -#endif - #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void pgdat_init_split_queue(struct pglist_data *pgdat) { @@ -6226,7 +6217,6 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat) { pgdat_resize_init(pgdat); - pgdat_init_numabalancing(pgdat); pgdat_init_split_queue(pgdat); pgdat_init_kcompactd(pgdat); -- 2.7.4