A helper to find the backing page array based on a virtual address. This also ensures we do the same vm_flags check everywhere instead of slightly different or missing ones in a few places. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- arch/arm/mm/dma-mapping.c | 7 +------ drivers/iommu/dma-iommu.c | 15 +++------------ include/linux/dma-mapping.h | 1 + kernel/dma/remap.c | 13 +++++++++++-- 4 files changed, 16 insertions(+), 20 deletions(-) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 647fd25d2aba..7620d4f55e92 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1434,18 +1434,13 @@ static struct page **__atomic_get_pages(void *addr) static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) { - struct vm_struct *area; - if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) return __atomic_get_pages(cpu_addr); if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) return cpu_addr; - area = find_vm_area(cpu_addr); - if (area && (area->flags & VM_DMA_COHERENT)) - return area->pages; - return NULL; + return dma_common_find_pages(cpu_addr); } static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index cea561897086..002d3bb6254a 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -543,15 +543,6 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, return pages; } -static struct page **__iommu_dma_get_pages(void *cpu_addr) -{ - struct vm_struct *area = find_vm_area(cpu_addr); - - if (!area || !area->pages) - return NULL; - return area->pages; -} - /** * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space * @dev: Device to allocate memory for. Must be a real device @@ -940,7 +931,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) * If it the address is remapped, then it's either non-coherent * or highmem CMA, or an iommu_dma_alloc_remap() construction. */ - pages = __iommu_dma_get_pages(cpu_addr); + pages = dma_common_find_pages(cpu_addr); if (!pages) page = vmalloc_to_page(cpu_addr); dma_common_free_remap(cpu_addr, alloc_size); @@ -1050,7 +1041,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, return -ENXIO; if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { - struct page **pages = __iommu_dma_get_pages(cpu_addr); + struct page **pages = dma_common_find_pages(cpu_addr); if (pages) return __iommu_dma_mmap(pages, size, vma); @@ -1072,7 +1063,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, int ret; if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { - struct page **pages = __iommu_dma_get_pages(cpu_addr); + struct page **pages = dma_common_find_pages(cpu_addr); if (pages) { return sg_alloc_table_from_pages(sgt, pages, diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index ac320b7cacfd..cb07d1388d66 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -615,6 +615,7 @@ extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); +struct page **dma_common_find_pages(void *cpu_addr); void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot, const void *caller); diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c index 51958d21c810..52cdca386de0 100644 --- a/kernel/dma/remap.c +++ b/kernel/dma/remap.c @@ -11,6 +11,15 @@ #include <linux/slab.h> #include <linux/vmalloc.h> +struct page **dma_common_find_pages(void *cpu_addr) +{ + struct vm_struct *area = find_vm_area(cpu_addr); + + if (!area || area->flags != VM_DMA_COHERENT) + return NULL; + return area->pages; +} + static struct vm_struct *__dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot, const void *caller) { @@ -78,9 +87,9 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, */ void dma_common_free_remap(void *cpu_addr, size_t size) { - struct vm_struct *area = find_vm_area(cpu_addr); + struct page **pages = dma_common_find_pages(cpu_addr); - if (!area || area->flags != VM_DMA_COHERENT) { + if (!pages) { WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); return; } -- 2.20.1