Prepare for supporting the DMA_ATTR_NO_KERNEL_MAPPING flag in dma_alloc_pages. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- kernel/dma/direct.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index b5f20781d3a96f..b5d56810130b22 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -296,9 +296,10 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) { struct page *page; - void *ret; if (dma_should_alloc_from_pool(dev, gfp, 0)) { + void *ret; + page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok); if (!page) @@ -306,7 +307,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size, goto done; } - page = __dma_direct_alloc_pages(dev, size, gfp); + page = __dma_direct_alloc_pages(dev, size, gfp | __GFP_ZERO); if (!page) return NULL; if (PageHighMem(page)) { @@ -320,13 +321,11 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size, goto out_free_pages; } - ret = page_address(page); if (force_dma_unencrypted(dev)) { - if (set_memory_decrypted((unsigned long)ret, + if (set_memory_decrypted((unsigned long)page_address(page), 1 << get_order(size))) goto out_free_pages; } - memset(ret, 0, size); done: *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); return page; -- 2.28.0