The quilt patch titled Subject: mm: memory_hotplug: enumerate all supported section flags has been removed from the -mm tree. Its filename was mm-memory_hotplug-enumerate-all-supported-section-flags.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Muchun Song <songmuchun@xxxxxxxxxxxxx> Subject: mm: memory_hotplug: enumerate all supported section flags Date: Fri, 17 Jun 2022 21:56:49 +0800 Patch series "make hugetlb_optimize_vmemmap compatible with memmap_on_memory", v3. This series makes hugetlb_optimize_vmemmap compatible with memmap_on_memory. This patch (of 2): We are almost running out of section flags, only one bit is available in the worst case (powerpc with 256k pages). However, there are still some free bits (in ->section_mem_map) on other architectures (e.g. x86_64 has 10 bits available, arm64 has 8 bits available with worst case of 64K pages). We have hard coded those numbers in code, it is inconvenient to use those bits on other architectures except powerpc. So transfer those section flags to enumeration to make it easy to add new section flags in the future. Also, move SECTION_TAINT_ZONE_DEVICE into the scope of CONFIG_ZONE_DEVICE to save a bit on non-zone-device case. [songmuchun@xxxxxxxxxxxxx: replace enum with defines per David] Link: https://lkml.kernel.org/r/20220620110616.12056-2-songmuchun@xxxxxxxxxxxxx Link: https://lkml.kernel.org/r/20220617135650.74901-1-songmuchun@xxxxxxxxxxxxx Link: https://lkml.kernel.org/r/20220617135650.74901-2-songmuchun@xxxxxxxxxxxxx Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx> Reviewed-by: David Hildenbrand <david@xxxxxxxxxx> Cc: Jonathan Corbet <corbet@xxxxxxx> Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: Oscar Salvador <osalvador@xxxxxxx> Cc: Paul E. McKenney <paulmck@xxxxxxxxxx> Cc: Xiongchun Duan <duanxiongchun@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/mmzone.h | 41 ++++++++++++++++++++++++++++++--------- mm/memory_hotplug.c | 6 +++++ mm/sparse.c | 2 - 3 files changed, 39 insertions(+), 10 deletions(-) --- a/include/linux/mmzone.h~mm-memory_hotplug-enumerate-all-supported-section-flags +++ a/include/linux/mmzone.h @@ -1418,16 +1418,32 @@ extern size_t mem_section_usage_size(voi * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the * worst combination is powerpc with 256k pages, * which results in PFN_SECTION_SHIFT equal 6. - * To sum it up, at least 6 bits are available. + * To sum it up, at least 6 bits are available on all architectures. + * However, we can exceed 6 bits on some other architectures except + * powerpc (e.g. 15 bits are available on x86_64, 13 bits are available + * with the worst case of 64K pages on arm64) if we make sure the + * exceeded bit is not applicable to powerpc. */ -#define SECTION_MARKED_PRESENT (1UL<<0) -#define SECTION_HAS_MEM_MAP (1UL<<1) -#define SECTION_IS_ONLINE (1UL<<2) -#define SECTION_IS_EARLY (1UL<<3) -#define SECTION_TAINT_ZONE_DEVICE (1UL<<4) -#define SECTION_MAP_LAST_BIT (1UL<<5) -#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) -#define SECTION_NID_SHIFT 6 +enum { + SECTION_MARKED_PRESENT_BIT, + SECTION_HAS_MEM_MAP_BIT, + SECTION_IS_ONLINE_BIT, + SECTION_IS_EARLY_BIT, +#ifdef CONFIG_ZONE_DEVICE + SECTION_TAINT_ZONE_DEVICE_BIT, +#endif + SECTION_MAP_LAST_BIT, +}; + +#define SECTION_MARKED_PRESENT BIT(SECTION_MARKED_PRESENT_BIT) +#define SECTION_HAS_MEM_MAP BIT(SECTION_HAS_MEM_MAP_BIT) +#define SECTION_IS_ONLINE BIT(SECTION_IS_ONLINE_BIT) +#define SECTION_IS_EARLY BIT(SECTION_IS_EARLY_BIT) +#ifdef CONFIG_ZONE_DEVICE +#define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT) +#endif +#define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1)) +#define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT static inline struct page *__section_mem_map_addr(struct mem_section *section) { @@ -1466,12 +1482,19 @@ static inline int online_section(struct return (section && (section->section_mem_map & SECTION_IS_ONLINE)); } +#ifdef CONFIG_ZONE_DEVICE static inline int online_device_section(struct mem_section *section) { unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE; return section && ((section->section_mem_map & flags) == flags); } +#else +static inline int online_device_section(struct mem_section *section) +{ + return 0; +} +#endif static inline int online_section_nr(unsigned long nr) { --- a/mm/memory_hotplug.c~mm-memory_hotplug-enumerate-all-supported-section-flags +++ a/mm/memory_hotplug.c @@ -670,12 +670,18 @@ static void __meminit resize_pgdat_range } +#ifdef CONFIG_ZONE_DEVICE static void section_taint_zone_device(unsigned long pfn) { struct mem_section *ms = __pfn_to_section(pfn); ms->section_mem_map |= SECTION_TAINT_ZONE_DEVICE; } +#else +static inline void section_taint_zone_device(unsigned long pfn) +{ +} +#endif /* * Associate the pfn range with the given zone, initializing the memmaps --- a/mm/sparse.c~mm-memory_hotplug-enumerate-all-supported-section-flags +++ a/mm/sparse.c @@ -281,7 +281,7 @@ static unsigned long sparse_encode_mem_m { unsigned long coded_mem_map = (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); - BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT)); + BUILD_BUG_ON(SECTION_MAP_LAST_BIT > PFN_SECTION_SHIFT); BUG_ON(coded_mem_map & ~SECTION_MAP_MASK); return coded_mem_map; } _ Patches currently in -mm which might be from songmuchun@xxxxxxxxxxxxx are mm-memory_hotplug-make-hugetlb_optimize_vmemmap-compatible-with-memmap_on_memory.patch mm-hugetlb-remove-minimum_order-variable.patch mm-hugetlb_vmemmap-delete-hugetlb_optimize_vmemmap_enabled.patch mm-hugetlb_vmemmap-optimize-vmemmap_optimize_mode-handling.patch mm-hugetlb_vmemmap-introduce-the-name-hvo.patch mm-hugetlb_vmemmap-move-vmemmap-code-related-to-hugetlb-to-hugetlb_vmemmapc.patch mm-hugetlb_vmemmap-replace-early_param-with-core_param.patch mm-hugetlb_vmemmap-improve-hugetlb_vmemmap-code-readability.patch mm-hugetlb_vmemmap-move-code-comments-to-vmemmap_deduprst.patch mm-hugetlb_vmemmap-use-ptrs_per_pte-instead-of-pmd_size-page_size.patch mm-memcontrol-remove-dead-code-and-comments.patch mm-rename-unlock_page_lruvec_irq-_irqrestore-to-lruvec_unlock_irq-_irqrestore.patch mm-memcontrol-prepare-objcg-api-for-non-kmem-usage.patch mm-memcontrol-make-lruvec-lock-safe-when-lru-pages-are-reparented.patch mm-vmscan-rework-move_pages_to_lru.patch mm-thp-make-split-queue-lock-safe-when-lru-pages-are-reparented.patch mm-memcontrol-make-all-the-callers-of-foliopage_memcg-safe.patch mm-memcontrol-introduce-memcg_reparent_ops.patch mm-memcontrol-use-obj_cgroup-apis-to-charge-the-lru-pages.patch mm-lru-add-vm_warn_on_once_folio-to-lru-maintenance-function.patch