On Thu, Feb 08, 2018 at 03:20:04PM -0800, Matthew Wilcox wrote: > So ... we could enable ZONE_DMA32 on 32-bit architectures. I don't know > what side-effects that might have; it's clearly only been tested on 64-bit > architectures so far. > > It might be best to just revert 19809c2da28a and the follow-on 704b862f9efd. Alternatively, try this. It passes in GFP_DMA32 from vmalloc_32, regardless of whether ZONE_DMA32 exists or not. If ZONE_DMA32 doesn't exist, then we clear it in __vmalloc_area_node(), after using it to determine that we shouldn't set __GFP_HIGHMEM. diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 673942094328..91e8a95123c4 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1669,10 +1669,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, struct page **pages; unsigned int nr_pages, array_size, i; const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; - const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN; - const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ? - 0 : - __GFP_HIGHMEM; + gfp_t alloc_mask = gfp_mask | __GFP_NOWARN; + if (!(alloc_mask & GFP_ZONEMASK)) + alloc_mask |= __GFP_HIGHMEM; + if (!IS_ENABLED(CONFIG_ZONE_DMA32) && (alloc_mask & __GFP_DMA32)) + alloc_mask &= ~__GFP_DMA32; nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; array_size = (nr_pages * sizeof(struct page *)); @@ -1680,7 +1681,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { - pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask, + pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM, PAGE_KERNEL, node, area->caller); } else { pages = kmalloc_node(array_size, nested_gfp, node); @@ -1696,9 +1697,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, struct page *page; if (node == NUMA_NO_NODE) - page = alloc_page(alloc_mask|highmem_mask); + page = alloc_page(alloc_mask); else - page = alloc_pages_node(node, alloc_mask|highmem_mask, 0); + page = alloc_pages_node(node, alloc_mask, 0); if (unlikely(!page)) { /* Successfully allocated i pages, free them in __vunmap() */ @@ -1706,7 +1707,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, goto fail; } area->pages[i] = page; - if (gfpflags_allow_blocking(gfp_mask|highmem_mask)) + if (gfpflags_allow_blocking(gfp_mask)) cond_resched(); } @@ -1942,12 +1943,10 @@ void *vmalloc_exec(unsigned long size) NUMA_NO_NODE, __builtin_return_address(0)); } -#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) -#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL -#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) +#if defined(CONFIG_64BIT) && !defined(CONFIG_ZONE_DMA32) #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL #else -#define GFP_VMALLOC32 GFP_KERNEL +#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL #endif /** -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>