The patch titled Subject: mm/memory_hotplug: drop MHP_MEMBLOCK_API has been removed from the -mm tree. Its filename was mm-memory_hotplug-drop-mhp_memblock_api.patch This patch was dropped because it was merged into mainline or a subsystem tree ------------------------------------------------------ From: David Hildenbrand <david@xxxxxxxxxx> Subject: mm/memory_hotplug: drop MHP_MEMBLOCK_API No longer needed, the callers of arch_add_memory() can handle this manually. Link: http://lkml.kernel.org/r/20190527111152.16324-9-david@xxxxxxxxxx Signed-off-by: David Hildenbrand <david@xxxxxxxxxx> Reviewed-by: Wei Yang <richardw.yang@xxxxxxxxxxxxxxx> Acked-by: Michal Hocko <mhocko@xxxxxxxx> Cc: David Hildenbrand <david@xxxxxxxxxx> Cc: Oscar Salvador <osalvador@xxxxxxxx> Cc: Pavel Tatashin <pasha.tatashin@xxxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: Qian Cai <cai@xxxxxx> Cc: Arun KS <arunks@xxxxxxxxxxxxxx> Cc: Mathieu Malaterre <malat@xxxxxxxxxx> Cc: Mike Rapoport <rppt@xxxxxxxxxxxxxxxxxx> Cc: Alex Deucher <alexander.deucher@xxxxxxx> Cc: Andrew Banman <andrew.banman@xxxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxx> Cc: Anshuman Khandual <anshuman.khandual@xxxxxxx> Cc: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx> Cc: Baoquan He <bhe@xxxxxxxxxx> Cc: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx> Cc: Borislav Petkov <bp@xxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Chintan Pandya <cpandya@xxxxxxxxxxxxxx> Cc: Christophe Leroy <christophe.leroy@xxxxxx> Cc: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Cc: Dan Williams <dan.j.williams@xxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: "David S. Miller" <davem@xxxxxxxxxxxxx> Cc: Fenghua Yu <fenghua.yu@xxxxxxxxx> Cc: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> Cc: Heiko Carstens <heiko.carstens@xxxxxxxxxx> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Jonathan Cameron <Jonathan.Cameron@xxxxxxxxxx> Cc: Jun Yao <yaojun8558363@xxxxxxxxx> Cc: "Kirill A. Shutemov" <kirill.shutemov@xxxxxxxxxxxxxxx> Cc: Logan Gunthorpe <logang@xxxxxxxxxxxx> Cc: Mark Brown <broonie@xxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx> Cc: Masahiro Yamada <yamada.masahiro@xxxxxxxxxxxxx> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx> Cc: Mike Rapoport <rppt@xxxxxxxxxxxxx> Cc: "mike.travis@xxxxxxx" <mike.travis@xxxxxxx> Cc: Nicholas Piggin <npiggin@xxxxxxxxx> Cc: Oscar Salvador <osalvador@xxxxxxx> Cc: Paul Mackerras <paulus@xxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: "Rafael J. Wysocki" <rafael@xxxxxxxxxx> Cc: Rich Felker <dalias@xxxxxxxx> Cc: Rob Herring <robh@xxxxxxxxxx> Cc: Robin Murphy <robin.murphy@xxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Tony Luck <tony.luck@xxxxxxxxx> Cc: Vasily Gorbik <gor@xxxxxxxxxxxxx> Cc: Will Deacon <will.deacon@xxxxxxx> Cc: Yoshinori Sato <ysato@xxxxxxxxxxxxxxxxxxxx> Cc: Yu Zhao <yuzhao@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/memory_hotplug.h | 8 -------- mm/memory_hotplug.c | 9 +++------ 2 files changed, 3 insertions(+), 14 deletions(-) --- a/include/linux/memory_hotplug.h~mm-memory_hotplug-drop-mhp_memblock_api +++ a/include/linux/memory_hotplug.h @@ -128,14 +128,6 @@ extern void arch_remove_memory(int nid, extern void __remove_pages(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); -/* - * Do we want sysfs memblock files created. This will allow userspace to online - * and offline memory explicitly. Lack of this bit means that the caller has to - * call move_pfn_range_to_zone to finish the initialization. - */ - -#define MHP_MEMBLOCK_API (1<<0) - /* reasonably generic interface to expand the physical pages */ extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, struct mhp_restrictions *restrictions); --- a/mm/memory_hotplug.c~mm-memory_hotplug-drop-mhp_memblock_api +++ a/mm/memory_hotplug.c @@ -251,7 +251,7 @@ void __init register_page_bootmem_info_n #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ static int __meminit __add_section(int nid, unsigned long phys_start_pfn, - struct vmem_altmap *altmap, bool want_memblock) + struct vmem_altmap *altmap) { int ret; @@ -294,8 +294,7 @@ int __ref __add_pages(int nid, unsigned } for (i = start_sec; i <= end_sec; i++) { - err = __add_section(nid, section_nr_to_pfn(i), altmap, - restrictions->flags & MHP_MEMBLOCK_API); + err = __add_section(nid, section_nr_to_pfn(i), altmap); /* * EEXIST is finally dealt with by ioresource collision @@ -1065,9 +1064,7 @@ static int online_memory_block(struct me */ int __ref add_memory_resource(int nid, struct resource *res) { - struct mhp_restrictions restrictions = { - .flags = MHP_MEMBLOCK_API, - }; + struct mhp_restrictions restrictions = {}; u64 start, size; bool new_node = false; int ret; _ Patches currently in -mm which might be from david@xxxxxxxxxx are