The cacheflag argument to __ioremap is always 0, so just implement ioremap directly. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- arch/nios2/include/asm/io.h | 20 ++++---------------- arch/nios2/mm/ioremap.c | 17 +++-------------- 2 files changed, 7 insertions(+), 30 deletions(-) diff --git a/arch/nios2/include/asm/io.h b/arch/nios2/include/asm/io.h index 9010243077ab..74ab34aa6731 100644 --- a/arch/nios2/include/asm/io.h +++ b/arch/nios2/include/asm/io.h @@ -25,29 +25,17 @@ #define writew_relaxed(x, addr) writew(x, addr) #define writel_relaxed(x, addr) writel(x, addr) -extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size, - unsigned long cacheflag); +void __iomem *ioremap(unsigned long physaddr, unsigned long size); extern void __iounmap(void __iomem *addr); -static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size) -{ - return __ioremap(physaddr, size, 0); -} - -static inline void __iomem *ioremap_nocache(unsigned long physaddr, - unsigned long size) -{ - return __ioremap(physaddr, size, 0); -} - static inline void iounmap(void __iomem *addr) { __iounmap(addr); } -#define ioremap_nocache ioremap_nocache -#define ioremap_wc ioremap_nocache -#define ioremap_wt ioremap_nocache +#define ioremap_nocache ioremap +#define ioremap_wc ioremap +#define ioremap_wt ioremap /* Pages to physical address... */ #define page_to_phys(page) virt_to_phys(page_to_virt(page)) diff --git a/arch/nios2/mm/ioremap.c b/arch/nios2/mm/ioremap.c index 3a28177a01eb..7a1a27f3daa3 100644 --- a/arch/nios2/mm/ioremap.c +++ b/arch/nios2/mm/ioremap.c @@ -112,8 +112,7 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, /* * Map some physical address range into the kernel address space. */ -void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, - unsigned long cacheflag) +void __iomem *ioremap(unsigned long phys_addr, unsigned long size) { struct vm_struct *area; unsigned long offset; @@ -139,15 +138,6 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, return NULL; } - /* - * Map uncached objects in the low part of address space to - * CONFIG_NIOS2_IO_REGION_BASE - */ - if (IS_MAPPABLE_UNCACHEABLE(phys_addr) && - IS_MAPPABLE_UNCACHEABLE(last_addr) && - !(cacheflag & _PAGE_CACHED)) - return (void __iomem *)(CONFIG_NIOS2_IO_REGION_BASE + phys_addr); - /* Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; @@ -158,14 +148,13 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, if (!area) return NULL; addr = area->addr; - if (remap_area_pages((unsigned long) addr, phys_addr, size, - cacheflag)) { + if (remap_area_pages((unsigned long) addr, phys_addr, size, 0)) { vunmap(addr); return NULL; } return (void __iomem *) (offset + (char *)addr); } -EXPORT_SYMBOL(__ioremap); +EXPORT_SYMBOL(ioremap); /* * __iounmap unmaps nearly everything, so be careful -- 2.20.1