Some architectures would want different restrictions. Hence add an architecture-specific override. The PMD_SIZE check is moved there. Acked-by: David Hildenbrand <david@xxxxxxxxxx> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxx> --- mm/memory_hotplug.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index eca32ccd45cc..457824a6ecb8 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1247,9 +1247,24 @@ static int online_memory_block(struct memory_block *mem, void *arg) return device_online(&mem->dev); } +#ifndef arch_supports_memmap_on_memory +static inline bool arch_supports_memmap_on_memory(unsigned long size) +{ + unsigned long nr_vmemmap_pages = size >> PAGE_SHIFT; + unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page); + + /* + * As default, we want the vmemmap to span a complete PMD such that we + * can map the vmemmap using a single PMD if supported by the + * architecture. + */ + return IS_ALIGNED(vmemmap_size, PMD_SIZE); +} +#endif + static bool mhp_supports_memmap_on_memory(unsigned long size) { - unsigned long nr_vmemmap_pages = size / PAGE_SIZE; + unsigned long nr_vmemmap_pages = size >> PAGE_SHIFT; unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page); unsigned long remaining_size = size - vmemmap_size; @@ -1281,8 +1296,8 @@ static bool mhp_supports_memmap_on_memory(unsigned long size) */ return mhp_memmap_on_memory() && size == memory_block_size_bytes() && - IS_ALIGNED(vmemmap_size, PMD_SIZE) && - IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT)); + IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT)) && + arch_supports_memmap_on_memory(size); } /* -- 2.41.0