Add a new kconfig option that can be selected if we want to allow pageblock alignment by reserving pages in the vmemmap altmap area. This implies we will be reserving some pages for every memoryblock This also allows the memmap on memory feature to be widely useful with different memory block size values. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxx> --- mm/memory_hotplug.c | 109 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 96 insertions(+), 13 deletions(-) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 5921c81fcb70..c409f5ff6a59 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -41,17 +41,85 @@ #include "internal.h" #include "shuffle.h" +enum { + MEMMAP_ON_MEMORY_DISABLE = 0, + MEMMAP_ON_MEMORY_ENABLE, + MEMMAP_ON_MEMORY_FORCE, +}; + +static int memmap_mode __read_mostly = MEMMAP_ON_MEMORY_DISABLE; + +static inline unsigned long memory_block_align_base(unsigned long size) +{ + if (memmap_mode == MEMMAP_ON_MEMORY_FORCE) { + unsigned long align; + unsigned long nr_vmemmap_pages = size >> PAGE_SHIFT; + unsigned long vmemmap_size; + + vmemmap_size = DIV_ROUND_UP(nr_vmemmap_pages * sizeof(struct page), PAGE_SIZE); + align = pageblock_align(vmemmap_size) - vmemmap_size; + return align; + } else + return 0; +} + #ifdef CONFIG_MHP_MEMMAP_ON_MEMORY /* * memory_hotplug.memmap_on_memory parameter */ -static bool memmap_on_memory __ro_after_init; -module_param(memmap_on_memory, bool, 0444); -MODULE_PARM_DESC(memmap_on_memory, "Enable memmap on memory for memory hotplug"); +static int set_memmap_mode(const char *val, const struct kernel_param *kp) +{ + int ret, mode; + bool enabled; + + if (sysfs_streq(val, "force") || sysfs_streq(val, "FORCE")) { + mode = MEMMAP_ON_MEMORY_FORCE; + goto matched; + } + + ret = kstrtobool(val, &enabled); + if (ret < 0) + return ret; + if (enabled) + mode = MEMMAP_ON_MEMORY_ENABLE; + else + mode = MEMMAP_ON_MEMORY_DISABLE; + +matched: + *((int *)kp->arg) = mode; + if (mode == MEMMAP_ON_MEMORY_FORCE) { + pr_info("Memory hotplug will reserve %ld pages in each memory block\n", + memory_block_align_base(memory_block_size_bytes())); + } + return 0; +} + +static int get_memmap_mode(char *buffer, const struct kernel_param *kp) +{ + if (*((int *)kp->arg) == MEMMAP_ON_MEMORY_FORCE) + return sprintf(buffer, "force\n"); + if (*((int *)kp->arg) == MEMMAP_ON_MEMORY_ENABLE) + return sprintf(buffer, "y\n"); + + return sprintf(buffer, "n\n"); +} + +static const struct kernel_param_ops memmap_mode_ops = { + .set = set_memmap_mode, + .get = get_memmap_mode, +}; +module_param_cb(memmap_on_memory, &memmap_mode_ops, &memmap_mode, 0444); +MODULE_PARM_DESC(memmap_on_memory, "Enable memmap on memory for memory hotplug\n" + "With value \"force\" it could result in memory wastage due to memmap size limitations \n" + "For example, if the memmap for a memory block requires 1 MiB, but the pageblock \n" + "size is 2 MiB, 1 MiB of hotplugged memory will be wasted. Note that there are \n" + "still cases where the feature cannot be enforced: for example, if the memmap is \n" + "smaller than a single page, or if the architecture does not support the forced \n" + "mode in all configurations. (y/n/force)"); static inline bool mhp_memmap_on_memory(void) { - return memmap_on_memory; + return !!memmap_mode; } #else static inline bool mhp_memmap_on_memory(void) @@ -1264,7 +1332,6 @@ static inline bool arch_supports_memmap_on_memory(unsigned long size) static bool mhp_supports_memmap_on_memory(unsigned long size) { - unsigned long nr_vmemmap_pages = size >> PAGE_SHIFT; unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page); unsigned long remaining_size = size - vmemmap_size; @@ -1295,10 +1362,23 @@ static bool mhp_supports_memmap_on_memory(unsigned long size) * altmap as an alternative source of memory, and we do not exactly * populate a single PMD. */ - return mhp_memmap_on_memory() && - size == memory_block_size_bytes() && - IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT)) && - arch_supports_memmap_on_memory(size); + if (!mhp_memmap_on_memory() || size != memory_block_size_bytes()) + return false; + + /* + * Make sure the vmemmap allocation is fully contained + * so that we always allocate vmemmap memory from altmap area. + */ + if (!IS_ALIGNED(vmemmap_size, PAGE_SIZE)) + return false; + /* + * Without page reservation remaining pages should be pageblock aligned. + */ + if (memmap_mode != MEMMAP_ON_MEMORY_FORCE && + !IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT))) + return false; + + return arch_supports_memmap_on_memory(size); } /* @@ -1311,7 +1391,11 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) { struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) }; enum memblock_flags memblock_flags = MEMBLOCK_NONE; - struct vmem_altmap mhp_altmap = {}; + struct vmem_altmap mhp_altmap = { + .base_pfn = PHYS_PFN(res->start), + .end_pfn = PHYS_PFN(res->end), + .reserve = memory_block_align_base(resource_size(res)), + }; struct memory_group *group = NULL; u64 start, size; bool new_node = false; @@ -1356,8 +1440,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) */ if (mhp_flags & MHP_MEMMAP_ON_MEMORY) { if (mhp_supports_memmap_on_memory(size)) { - mhp_altmap.free = PHYS_PFN(size); - mhp_altmap.base_pfn = PHYS_PFN(start); + mhp_altmap.free = PHYS_PFN(size) - mhp_altmap.reserve; params.altmap = &mhp_altmap; } /* fallback to not using altmap */ @@ -1369,7 +1452,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) goto error; /* create memory block devices after memory was added */ - ret = create_memory_block_devices(start, size, mhp_altmap.alloc, + ret = create_memory_block_devices(start, size, mhp_altmap.alloc + mhp_altmap.reserve, group); if (ret) { arch_remove_memory(start, size, NULL); -- 2.41.0