The quilt patch titled Subject: mm/memory_hotplug: drop 'reason' argument from check_pfn_span() has been removed from the -mm tree. Its filename was mm-memory_hotplug-drop-reason-argument-from-check_pfn_span.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Anshuman Khandual <anshuman.khandual@xxxxxxx> Subject: mm/memory_hotplug: drop 'reason' argument from check_pfn_span() Date: Tue, 31 May 2022 14:34:41 +0530 In check_pfn_span(), a 'reason' string is being used to recreate the caller function name, while printing the warning message. It is really unnecessary as the warning message could just be printed inside the caller depending on the return code. Currently there are just two callers for check_pfn_span() i.e __add_pages() and __remove_pages(). Let's clean this up. Link: https://lkml.kernel.org/r/20220531090441.170650-1-anshuman.khandual@xxxxxxx Signed-off-by: Anshuman Khandual <anshuman.khandual@xxxxxxx> Acked-by: Oscar Salvador <osalvador@xxxxxxx> Reviewed-by: David Hildenbrand <david@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/memory_hotplug.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) --- a/mm/memory_hotplug.c~mm-memory_hotplug-drop-reason-argument-from-check_pfn_span +++ a/mm/memory_hotplug.c @@ -237,8 +237,7 @@ static void release_memory_resource(stru kfree(res); } -static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, - const char *reason) +static int check_pfn_span(unsigned long pfn, unsigned long nr_pages) { /* * Disallow all operations smaller than a sub-section and only @@ -255,12 +254,8 @@ static int check_pfn_span(unsigned long min_align = PAGES_PER_SUBSECTION; else min_align = PAGES_PER_SECTION; - if (!IS_ALIGNED(pfn, min_align) - || !IS_ALIGNED(nr_pages, min_align)) { - WARN(1, "Misaligned __%s_pages start: %#lx end: #%lx\n", - reason, pfn, pfn + nr_pages - 1); + if (!IS_ALIGNED(pfn | nr_pages, min_align)) return -EINVAL; - } return 0; } @@ -337,9 +332,10 @@ int __ref __add_pages(int nid, unsigned altmap->alloc = 0; } - err = check_pfn_span(pfn, nr_pages, "add"); - if (err) - return err; + if (check_pfn_span(pfn, nr_pages)) { + WARN(1, "Misaligned %s start: %#lx end: #%lx\n", __func__, pfn, pfn + nr_pages - 1); + return -EINVAL; + } for (; pfn < end_pfn; pfn += cur_nr_pages) { /* Select all remaining pages up to the next section boundary */ @@ -536,8 +532,10 @@ void __remove_pages(unsigned long pfn, u map_offset = vmem_altmap_offset(altmap); - if (check_pfn_span(pfn, nr_pages, "remove")) + if (check_pfn_span(pfn, nr_pages)) { + WARN(1, "Misaligned %s start: %#lx end: #%lx\n", __func__, pfn, pfn + nr_pages - 1); return; + } for (; pfn < end_pfn; pfn += cur_nr_pages) { cond_resched(); _ Patches currently in -mm which might be from anshuman.khandual@xxxxxxx are