Re: [PATCH v5 7/7] hugetlb: parallelize 1G hugetlb initialization

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 2024/1/26 23:24, Gang Li wrote:
Optimizing the initialization speed of 1G huge pages through
parallelization.

1G hugetlbs are allocated from bootmem, a process that is already
very fast and does not currently require optimization. Therefore,
we focus on parallelizing only the initialization phase in
`gather_bootmem_prealloc`.

Here are some test results:
       test case       no patch(ms)   patched(ms)   saved
  ------------------- -------------- ------------- --------
   256c2T(4 node) 1G           4745          2024   57.34%
   128c1T(2 node) 1G           3358          1712   49.02%
      12T         1G          77000         18300   76.23%

Signed-off-by: Gang Li <ligang.bdlg@xxxxxxxxxxxxx>
Tested-by: David Rientjes <rientjes@xxxxxxxxxx>
---
  arch/powerpc/mm/hugetlbpage.c |  2 +-
  include/linux/hugetlb.h       |  2 +-
  mm/hugetlb.c                  | 44 ++++++++++++++++++++++++++++-------
  3 files changed, 38 insertions(+), 10 deletions(-)

diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0a540b37aab62..a1651d5471862 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -226,7 +226,7 @@ static int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
  		return 0;
  	m = phys_to_virt(gpage_freearray[--nr_gpages]);
  	gpage_freearray[nr_gpages] = 0;
-	list_add(&m->list, &huge_boot_pages);
+	list_add(&m->list, &huge_boot_pages[0]);
  	m->hstate = hstate;
  	return 1;
  }
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index c1ee640d87b11..77b30a8c6076b 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -178,7 +178,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
  struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
extern int sysctl_hugetlb_shm_group;
-extern struct list_head huge_boot_pages;
+extern struct list_head huge_boot_pages[MAX_NUMNODES];
/* arch callbacks */ diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 385840397bce5..eee0c456f6571 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -69,7 +69,7 @@ static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
  #endif
  static unsigned long hugetlb_cma_size __initdata;
-__initdata LIST_HEAD(huge_boot_pages);
+__initdata struct list_head huge_boot_pages[MAX_NUMNODES];
/* for command line parsing */
  static struct hstate * __initdata parsed_hstate;
@@ -3301,7 +3301,7 @@ int alloc_bootmem_huge_page(struct hstate *h, int nid)
  int __alloc_bootmem_huge_page(struct hstate *h, int nid)
  {
  	struct huge_bootmem_page *m = NULL; /* initialize for clang */
-	int nr_nodes, node;
+	int nr_nodes, node = nid;
/* do node specific alloc */
  	if (nid != NUMA_NO_NODE) {
@@ -3339,7 +3339,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
  		huge_page_size(h) - PAGE_SIZE);
  	/* Put them into a private list first because mem_map is not up yet */
  	INIT_LIST_HEAD(&m->list);
-	list_add(&m->list, &huge_boot_pages);
+	list_add(&m->list, &huge_boot_pages[node]);
  	m->hstate = h;
  	return 1;
  }
@@ -3390,8 +3390,6 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h,
  	/* Send list for bulk vmemmap optimization processing */
  	hugetlb_vmemmap_optimize_folios(h, folio_list);
- /* Add all new pool pages to free lists in one lock cycle */
-	spin_lock_irqsave(&hugetlb_lock, flags);
  	list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
  		if (!folio_test_hugetlb_vmemmap_optimized(folio)) {
  			/*
@@ -3404,23 +3402,27 @@ static void __init prep_and_add_bootmem_folios(struct hstate *h,
  					HUGETLB_VMEMMAP_RESERVE_PAGES,
  					pages_per_huge_page(h));
  		}
+		/* Subdivide locks to achieve better parallel performance */
+		spin_lock_irqsave(&hugetlb_lock, flags);
  		__prep_account_new_huge_page(h, folio_nid(folio));
  		enqueue_hugetlb_folio(h, folio);
+		spin_unlock_irqrestore(&hugetlb_lock, flags);
  	}
-	spin_unlock_irqrestore(&hugetlb_lock, flags);
  }
/*
   * Put bootmem huge pages into the standard lists after mem_map is up.
   * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
   */
-static void __init gather_bootmem_prealloc(void)
+static void __init gather_bootmem_prealloc_node(unsigned long start, unsigned long end, void *arg)
+
  {
+	int nid = start;

Sorry for so late to notice an issue here. I have seen a comment from
PADATA, whcih says:

    @max_threads: Max threads to use for the job, actual number may be less
                  depending on task size and minimum chunk size.

PADATA will not guarantee gather_bootmem_prealloc_node() will be called
->max_threads times (You have initialized it to the number of NUMA nodes in
gather_bootmem_prealloc). Therefore, we should add a loop here to initialize
multiple nodes, namely (@end - @start) here. Otherwise, we will miss
initializing some nodes.

Thanks.

  	LIST_HEAD(folio_list);
  	struct huge_bootmem_page *m;
  	struct hstate *h = NULL, *prev_h = NULL;
- list_for_each_entry(m, &huge_boot_pages, list) {
+	list_for_each_entry(m, &huge_boot_pages[nid], list) {
  		struct page *page = virt_to_page(m);
  		struct folio *folio = (void *)page;
@@ -3453,6 +3455,22 @@ static void __init gather_bootmem_prealloc(void)
  	prep_and_add_bootmem_folios(h, &folio_list);
  }
+static void __init gather_bootmem_prealloc(void)
+{
+	struct padata_mt_job job = {
+		.thread_fn	= gather_bootmem_prealloc_node,
+		.fn_arg		= NULL,
+		.start		= 0,
+		.size		= num_node_state(N_MEMORY),
+		.align		= 1,
+		.min_chunk	= 1,
+		.max_threads	= num_node_state(N_MEMORY),
+		.numa_aware	= true,
+	};
+
+	padata_do_multithreaded(&job);
+}
+
  static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
  {
  	unsigned long i;
@@ -3600,6 +3618,7 @@ static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
  static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
  {
  	unsigned long allocated;
+	static bool initialied __initdata;
/* skip gigantic hugepages allocation if hugetlb_cma enabled */
  	if (hstate_is_gigantic(h) && hugetlb_cma_size) {
@@ -3607,6 +3626,15 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
  		return;
  	}
+ /* hugetlb_hstate_alloc_pages will be called many times, initialize huge_boot_pages once */
+	if (!initialied) {
+		int i = 0;
+
+		for (i = 0; i < MAX_NUMNODES; i++)
+			INIT_LIST_HEAD(&huge_boot_pages[i]);
+		initialied = true;
+	}
+
  	/* do node specific alloc */
  	if (hugetlb_hstate_alloc_pages_specific_nodes(h))
  		return;





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux