From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@xxxxxxxxxxxxxx> This commit introduces alloc_contig_freed_pages() function which allocates (ie. removes from buddy system) free pages in range. Caller has to guarantee that all pages in range are in buddy system. Along with this function, a free_contig_pages() function is provided which frees all (or a subset of) pages allocated with alloc_contig_free_pages(). Michal Nazarewicz has modified the function to make it easier to allocate not MAX_ORDER_NR_PAGES aligned pages by making it return pfn of one-past-the-last allocated page. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@xxxxxxxxxxxxxx> Signed-off-by: Michal Nazarewicz <m.nazarewicz@xxxxxxxxxxx> [m.nazarewicz: added checks if all allocated pages comes from the same memory zone] Signed-off-by: Michal Nazarewicz <mina86@xxxxxxxxxx> Signed-off-by: Kyungmin Park <kyungmin.park@xxxxxxxxxxx> [m.szyprowski: fixed wrong condition in VM_BUG_ON assert] Signed-off-by: Marek Szyprowski <m.szyprowski@xxxxxxxxxxx> Acked-by: Arnd Bergmann <arnd@xxxxxxxx> --- include/linux/mmzone.h | 16 +++++++++ include/linux/page-isolation.h | 5 +++ mm/page_alloc.c | 67 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 88 insertions(+), 0 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index a2760bb..862a834 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1168,6 +1168,22 @@ static inline int memmap_valid_within(unsigned long pfn, } #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ +#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) +/* + * Both PFNs must be from the same zone! If this function returns + * true, pfn_to_page(pfn1) + (pfn2 - pfn1) == pfn_to_page(pfn2). + */ +static inline bool zone_pfn_same_memmap(unsigned long pfn1, unsigned long pfn2) +{ + return pfn_to_section_nr(pfn1) == pfn_to_section_nr(pfn2); +} + +#else + +#define zone_pfn_same_memmap(pfn1, pfn2) (true) + +#endif + #endif /* !__GENERATING_BOUNDS.H */ #endif /* !__ASSEMBLY__ */ #endif /* _LINUX_MMZONE_H */ diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 58cdbac..b9fc428 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -33,6 +33,11 @@ test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn); extern int set_migratetype_isolate(struct page *page); extern void unset_migratetype_isolate(struct page *page); +/* The below functions must be run on a range from a single zone. */ +extern unsigned long alloc_contig_freed_pages(unsigned long start, + unsigned long end, gfp_t flag); +extern void free_contig_pages(unsigned long pfn, unsigned nr_pages); + /* * For migration. */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bf4399a..fbfb920 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5706,6 +5706,73 @@ out: spin_unlock_irqrestore(&zone->lock, flags); } +unsigned long alloc_contig_freed_pages(unsigned long start, unsigned long end, + gfp_t flag) +{ + unsigned long pfn = start, count; + struct page *page; + struct zone *zone; + int order; + + VM_BUG_ON(!pfn_valid(start)); + page = pfn_to_page(start); + zone = page_zone(page); + + spin_lock_irq(&zone->lock); + + for (;;) { + VM_BUG_ON(page_count(page) || !PageBuddy(page) || + page_zone(page) != zone); + + list_del(&page->lru); + order = page_order(page); + count = 1UL << order; + zone->free_area[order].nr_free--; + rmv_page_order(page); + __mod_zone_page_state(zone, NR_FREE_PAGES, -(long)count); + + pfn += count; + if (pfn >= end) + break; + VM_BUG_ON(!pfn_valid(pfn)); + + if (zone_pfn_same_memmap(pfn - count, pfn)) + page += count; + else + page = pfn_to_page(pfn); + } + + spin_unlock_irq(&zone->lock); + + /* After this, pages in the range can be freed one be one */ + count = pfn - start; + pfn = start; + for (page = pfn_to_page(pfn); count; --count) { + prep_new_page(page, 0, flag); + ++pfn; + if (likely(zone_pfn_same_memmap(pfn - 1, pfn))) + ++page; + else + page = pfn_to_page(pfn); + } + + return pfn; +} + +void free_contig_pages(unsigned long pfn, unsigned nr_pages) +{ + struct page *page = pfn_to_page(pfn); + + while (nr_pages--) { + __free_page(page); + ++pfn; + if (likely(zone_pfn_same_memmap(pfn - 1, pfn))) + ++page; + else + page = pfn_to_page(pfn); + } +} + #ifdef CONFIG_MEMORY_HOTREMOVE /* * All pages in the range must be isolated before calling this. -- 1.7.1.569.g6f426 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>