Re: [PATCH] mm/hugetlb: initialize order with UINT_MAX in dissolve_free_huge_pages()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, May 12, 2015 at 04:15:11PM -0700, Andrew Morton wrote:
> On Tue, 12 May 2015 09:20:35 +0000 Naoya Horiguchi <n-horiguchi@xxxxxxxxxxxxx> wrote:
> 
> > Currently the initial value of order in dissolve_free_huge_page is 64 or 32,
> > which leads to the following warning in static checker:
> > 
> >   mm/hugetlb.c:1203 dissolve_free_huge_pages()
> >   warn: potential right shift more than type allows '9,18,64'
> > 
> > This is a potential risk of infinite loop, because 1 << order (== 0) is used
> > in for-loop like this:
> > 
> >   for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
> >       ...
> > 
> > So this patch simply avoids the risk by initializing with UINT_MAX.
> > 
> > ..
> >
> > --- a/mm/hugetlb.c
> > +++ b/mm/hugetlb.c
> > @@ -1188,7 +1188,7 @@ static void dissolve_free_huge_page(struct page *page)
> >   */
> >  void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
> >  {
> > -	unsigned int order = 8 * sizeof(void *);
> > +	unsigned int order = UINT_MAX;
> >  	unsigned long pfn;
> >  	struct hstate *h;
> >  
> > @@ -1200,6 +1200,7 @@ void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
> >  		if (order > huge_page_order(h))
> >  			order = huge_page_order(h);
> >  	VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
> > +	VM_BUG_ON(order == UINT_MAX);
> >  	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
> >  		dissolve_free_huge_page(pfn_to_page(pfn));
> 
> Do we need to calculate this each time?  Can it be done in
> hugetlb_init_hstates(), save the result in a global?

Yes, it should work. How about the following?
This adds 4bytes to .data due to a new global variable, but reduces 47 bytes
.text size of code reduces, so it's a win in total.

   text    data     bss     dec     hex filename                         
  28313     469   84236  113018   1b97a mm/hugetlb.o (above patch)
  28266     473   84236  112975   1b94f mm/hugetlb.o (below patch)

---
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 271e4432734c..fecb8bbfe11e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -40,6 +40,7 @@ int hugepages_treat_as_movable;
 int hugetlb_max_hstate __read_mostly;
 unsigned int default_hstate_idx;
 struct hstate hstates[HUGE_MAX_HSTATE];
+unsigned int minimum_order __read_mostly;
 
 __initdata LIST_HEAD(huge_boot_pages);
 
@@ -1188,19 +1189,13 @@ static void dissolve_free_huge_page(struct page *page)
  */
 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
 {
-	unsigned int order = 8 * sizeof(void *);
 	unsigned long pfn;
-	struct hstate *h;
 
 	if (!hugepages_supported())
 		return;
 
-	/* Set scan step to minimum hugepage size */
-	for_each_hstate(h)
-		if (order > huge_page_order(h))
-			order = huge_page_order(h);
-	VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
-	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
+	VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
+	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
 		dissolve_free_huge_page(pfn_to_page(pfn));
 }
 
@@ -1626,11 +1621,16 @@ static void __init hugetlb_init_hstates(void)
 {
 	struct hstate *h;
 
+	minimum_order = UINT_MAX;
 	for_each_hstate(h) {
+		if (minimum_order > huge_page_order(h))
+			minimum_order = huge_page_order(h);
+
 		/* oversize hugepages were init'ed in early boot */
 		if (!hstate_is_gigantic(h))
 			hugetlb_hstate_alloc_pages(h);
 	}
+	VM_BUG_ON(minimum_order == UINT_MAX);
 }
 
 static char * __init memfmt(char *buf, unsigned long n)
-- 
2.1.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@xxxxxxxxx.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux]     [Linux OMAP]     [Linux MIPS]     [ECOS]     [Asterisk Internet PBX]     [Linux API]