From: Liu Song <liusong@xxxxxxxxxxxxxxxxx> In "dma_alloc_from_dev_coherent" and "dma_direct_alloc", the allocated memory is explicitly set to 0. A helper function "use_dev_coherent_memory" is introduced here to determine whether the memory is allocated by "dma_alloc_from_dev_coherent". And use "get_dma_ops" to determine whether the memory is allocated by "dma_direct_alloc". After this modification, memory allocated using "dma_pool_zalloc" can avoid duplicate memset. Signed-off-by: Liu Song <liusong@xxxxxxxxxxxxxxxxx> --- include/linux/dma-map-ops.h | 5 +++++ mm/dmapool.c | 5 ++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 0d5b06b..c29948d 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -171,6 +171,10 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, size_t size, int *ret); +static inline bool use_dev_coherent_memory(struct device *dev) +{ + return dev->dma_mem ? true : false; +} #else static inline int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size) @@ -180,6 +184,7 @@ static inline int dma_declare_coherent_memory(struct device *dev, #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) #define dma_release_from_dev_coherent(dev, order, vaddr) (0) #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) +#define use_dev_coherent_memory(dev) (0) #endif /* CONFIG_DMA_DECLARE_COHERENT */ #ifdef CONFIG_DMA_GLOBAL_POOL diff --git a/mm/dmapool.c b/mm/dmapool.c index a7eb5d0..6e03530 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -21,6 +21,7 @@ #include <linux/device.h> #include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/dmapool.h> #include <linux/kernel.h> #include <linux/list.h> @@ -372,7 +373,9 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, #endif spin_unlock_irqrestore(&pool->lock, flags); - if (want_init_on_alloc(mem_flags)) + if (want_init_on_alloc(mem_flags) && + !use_dev_coherent_memory(pool->dev) && + get_dma_ops(pool->dev)) memset(retval, 0, pool->size); return retval; -- 1.8.3.1