On Mon, Feb 10, 2020 at 07:05:56AM +0800, Wei Yang wrote: >On Sun, Feb 09, 2020 at 06:48:20PM +0800, Baoquan He wrote: >>Wrap the codes filling subsection map in section_activate() into >>fill_subsection_map(), this makes section_activate() cleaner and >>easier to follow. >> > >This looks a preparation for #ifdef the code for VMEMMAP, then why not take >the usage handling into this function too? > Oops, you are right. My mistake. >>Signed-off-by: Baoquan He <bhe@xxxxxxxxxx> >>--- >> mm/sparse.c | 45 ++++++++++++++++++++++++++++++++++----------- >> 1 file changed, 34 insertions(+), 11 deletions(-) >> >>diff --git a/mm/sparse.c b/mm/sparse.c >>index c184b69460b7..9ad741ccbeb6 100644 >>--- a/mm/sparse.c >>+++ b/mm/sparse.c >>@@ -788,24 +788,28 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages, >> depopulate_section_memmap(pfn, nr_pages, altmap); >> } >> >>-static struct page * __meminit section_activate(int nid, unsigned long pfn, >>- unsigned long nr_pages, struct vmem_altmap *altmap) >>+/** >>+ * fill_subsection_map - fill subsection map of a memory region >>+ * @pfn - start pfn of the memory range >>+ * @nr_pages - number of pfns to add in the region >>+ * >>+ * This clears the related subsection map inside one section, and only > >s/clears/fills/ ? > >>+ * intended for hotplug. >>+ * >>+ * Return: >>+ * * 0 - On success. >>+ * * -EINVAL - Invalid memory region. >>+ * * -EEXIST - Subsection map has been set. >>+ */ >>+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) >> { >>- DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; >> struct mem_section *ms = __pfn_to_section(pfn); >>- struct mem_section_usage *usage = NULL; >>+ DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; >> unsigned long *subsection_map; >>- struct page *memmap; >> int rc = 0; >> >> subsection_mask_set(map, pfn, nr_pages); >> >>- if (!ms->usage) { >>- usage = kzalloc(mem_section_usage_size(), GFP_KERNEL); >>- if (!usage) >>- return ERR_PTR(-ENOMEM); >>- ms->usage = usage; >>- } >> subsection_map = &ms->usage->subsection_map[0]; >> >> if (bitmap_empty(map, SUBSECTIONS_PER_SECTION)) >>@@ -816,6 +820,25 @@ static struct page * __meminit section_activate(int nid, unsigned long pfn, >> bitmap_or(subsection_map, map, subsection_map, >> SUBSECTIONS_PER_SECTION); >> >>+ return rc; >>+} >>+ >>+static struct page * __meminit section_activate(int nid, unsigned long pfn, >>+ unsigned long nr_pages, struct vmem_altmap *altmap) >>+{ >>+ struct mem_section *ms = __pfn_to_section(pfn); >>+ struct mem_section_usage *usage = NULL; >>+ struct page *memmap; >>+ int rc = 0; >>+ >>+ if (!ms->usage) { >>+ usage = kzalloc(mem_section_usage_size(), GFP_KERNEL); >>+ if (!usage) >>+ return ERR_PTR(-ENOMEM); >>+ ms->usage = usage; >>+ } >>+ >>+ rc = fill_subsection_map(pfn, nr_pages); >> if (rc) { >> if (usage) >> ms->usage = NULL; >>-- >>2.17.2 > >-- >Wei Yang >Help you, Help me -- Wei Yang Help you, Help me