Use the generic dma remapping allocator instead of open coding it. This also avoids setting up page tables from irq context which is generally dangerous and uses the atomic pool instead. Note that this changes the kernel virtual address at which the dma coherent memory is mapped from the DVMA_VADDR region to the general vmalloc pool. I could not find any indication that this matters for the hardware. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- arch/sparc/Kconfig | 3 ++- arch/sparc/kernel/ioport.c | 54 -------------------------------------- 2 files changed, 2 insertions(+), 55 deletions(-) diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index b120ed947f50b..66fc08646be5e 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -53,8 +53,9 @@ config SPARC32 def_bool !64BIT select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_CPU - select GENERIC_ATOMIC64 select CLZ_TAB + select DMA_DIRECT_REMAP + select GENERIC_ATOMIC64 select HAVE_UID16 select OLD_SIGACTION select ZONE_DMA diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 3eb748e862220..57a72c46eddb0 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -300,60 +300,6 @@ arch_initcall(sparc_register_ioport); #endif /* CONFIG_SBUS */ - -/* Allocate and map kernel buffer using consistent mode DMA for a device. - * hwdev should be valid struct pci_dev pointer for PCI devices. - */ -void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) -{ - unsigned long addr; - void *va; - - if (!size || size > 256 * 1024) /* __get_free_pages() limit */ - return NULL; - - size = PAGE_ALIGN(size); - va = (void *) __get_free_pages(gfp | __GFP_ZERO, get_order(size)); - if (!va) { - printk("%s: no %zd pages\n", __func__, size >> PAGE_SHIFT); - return NULL; - } - - addr = sparc_dma_alloc_resource(dev, size); - if (!addr) - goto err_nomem; - - srmmu_mapiorange(0, virt_to_phys(va), addr, size); - - *dma_handle = virt_to_phys(va); - return (void *)addr; - -err_nomem: - free_pages((unsigned long)va, get_order(size)); - return NULL; -} - -/* Free and unmap a consistent DMA buffer. - * cpu_addr is what was returned arch_dma_alloc, size must be the same as what - * was passed into arch_dma_alloc, and likewise dma_addr must be the same as - * what *dma_ndler was set to. - * - * References to the memory and mappings associated with cpu_addr/dma_addr - * past this call are illegal. - */ -void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_addr, unsigned long attrs) -{ - size = PAGE_ALIGN(size); - - if (!sparc_dma_free_resource(cpu_addr, size)) - return; - - srmmu_unmapiorange((unsigned long)cpu_addr, size); - free_pages((unsigned long)phys_to_virt(dma_addr), get_order(size)); -} - /* * IIep is write-through, not flushing on cpu to device transfer. * -- 2.30.2