On Tue 07-04-20 15:54:16, David Hildenbrand wrote: > Fortunately, all users of is_mem_section_removable() are gone. Get rid of > it, including some now unnecessary functions. > > Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx> > Cc: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx> > Cc: Michal Hocko <mhocko@xxxxxxxx> > Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> > Cc: Oscar Salvador <osalvador@xxxxxxx> > Cc: Baoquan He <bhe@xxxxxxxxxx> > Cc: Wei Yang <richard.weiyang@xxxxxxxxx> > Signed-off-by: David Hildenbrand <david@xxxxxxxxxx> Acked-by: Michal Hocko <mhocko@xxxxxxxx> > --- > include/linux/memory_hotplug.h | 7 ---- > mm/memory_hotplug.c | 75 ---------------------------------- \o/ Thanks! > 2 files changed, 82 deletions(-) > > diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h > index 93d9ada74ddd..7dca9cd6076b 100644 > --- a/include/linux/memory_hotplug.h > +++ b/include/linux/memory_hotplug.h > @@ -314,19 +314,12 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {} > > #ifdef CONFIG_MEMORY_HOTREMOVE > > -extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); > extern void try_offline_node(int nid); > extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); > extern int remove_memory(int nid, u64 start, u64 size); > extern void __remove_memory(int nid, u64 start, u64 size); > > #else > -static inline bool is_mem_section_removable(unsigned long pfn, > - unsigned long nr_pages) > -{ > - return false; > -} > - > static inline void try_offline_node(int nid) {} > > static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) > diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c > index 47cf6036eb31..4d338d546d52 100644 > --- a/mm/memory_hotplug.c > +++ b/mm/memory_hotplug.c > @@ -1112,81 +1112,6 @@ int add_memory(int nid, u64 start, u64 size) > EXPORT_SYMBOL_GPL(add_memory); > > #ifdef CONFIG_MEMORY_HOTREMOVE > -/* > - * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy > - * set and the size of the free page is given by page_order(). Using this, > - * the function determines if the pageblock contains only free pages. > - * Due to buddy contraints, a free page at least the size of a pageblock will > - * be located at the start of the pageblock > - */ > -static inline int pageblock_free(struct page *page) > -{ > - return PageBuddy(page) && page_order(page) >= pageblock_order; > -} > - > -/* Return the pfn of the start of the next active pageblock after a given pfn */ > -static unsigned long next_active_pageblock(unsigned long pfn) > -{ > - struct page *page = pfn_to_page(pfn); > - > - /* Ensure the starting page is pageblock-aligned */ > - BUG_ON(pfn & (pageblock_nr_pages - 1)); > - > - /* If the entire pageblock is free, move to the end of free page */ > - if (pageblock_free(page)) { > - int order; > - /* be careful. we don't have locks, page_order can be changed.*/ > - order = page_order(page); > - if ((order < MAX_ORDER) && (order >= pageblock_order)) > - return pfn + (1 << order); > - } > - > - return pfn + pageblock_nr_pages; > -} > - > -static bool is_pageblock_removable_nolock(unsigned long pfn) > -{ > - struct page *page = pfn_to_page(pfn); > - struct zone *zone; > - > - /* > - * We have to be careful here because we are iterating over memory > - * sections which are not zone aware so we might end up outside of > - * the zone but still within the section. > - * We have to take care about the node as well. If the node is offline > - * its NODE_DATA will be NULL - see page_zone. > - */ > - if (!node_online(page_to_nid(page))) > - return false; > - > - zone = page_zone(page); > - pfn = page_to_pfn(page); > - if (!zone_spans_pfn(zone, pfn)) > - return false; > - > - return !has_unmovable_pages(zone, page, MIGRATE_MOVABLE, > - MEMORY_OFFLINE); > -} > - > -/* Checks if this range of memory is likely to be hot-removable. */ > -bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) > -{ > - unsigned long end_pfn, pfn; > - > - end_pfn = min(start_pfn + nr_pages, > - zone_end_pfn(page_zone(pfn_to_page(start_pfn)))); > - > - /* Check the starting page of each pageblock within the range */ > - for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) { > - if (!is_pageblock_removable_nolock(pfn)) > - return false; > - cond_resched(); > - } > - > - /* All pageblocks in the memory block are likely to be hot-removable */ > - return true; > -} > - > /* > * Confirm all pages in a range [start, end) belong to the same zone (skipping > * memory holes). When true, return the zone. > -- > 2.25.1 > -- Michal Hocko SUSE Labs