When device DMA limit does not fit in DMA32 zone it should use DMA zone, even when DMA zone is stricter than needed. Same goes for devices that can't allocate from the entire normal zone. Limit to DMA32 in that case. Reported-by: Catalin Marinas <catalin.marinas@xxxxxxx> Reviewed-by: Catalin Marinas <catalin.marinas@xxxxxxx> Signed-off-by: Baruch Siach <baruch@xxxxxxxxxx> --- kernel/dma/direct.c | 6 +++--- kernel/dma/swiotlb.c | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 4480a3cd92e0..3b4be4ca3b08 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -4,7 +4,7 @@ * * DMA operations that map physical memory directly without using an IOMMU. */ -#include <linux/memblock.h> /* for max_pfn */ +#include <linux/memblock.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/dma-map-ops.h> @@ -59,9 +59,9 @@ static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit) * zones. */ *phys_limit = dma_to_phys(dev, dma_limit); - if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits)) + if (*phys_limit < DMA_BIT_MASK(32)) return GFP_DMA; - if (*phys_limit <= DMA_BIT_MASK(32)) + if (*phys_limit < memblock_end_of_DRAM()) return GFP_DMA32; return 0; } diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index df68d29740a0..043b0ecd3e8d 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -629,9 +629,9 @@ static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes, } gfp &= ~GFP_ZONEMASK; - if (phys_limit <= DMA_BIT_MASK(zone_dma_bits)) + if (phys_limit < DMA_BIT_MASK(32)) gfp |= __GFP_DMA; - else if (phys_limit <= DMA_BIT_MASK(32)) + else if (phys_limit < memblock_end_of_DRAM()) gfp |= __GFP_DMA32; while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) { -- 2.43.0