By taking GENERIC_IOREMAP method, the generic ioremap_prot() and iounmap() are visible and available to arch. Arch only needs to provide implementation of arch_ioremap() or arch_iounmap() if there's arch specific handling needed in its ioremap() or iounmap(). This change will simplify implementation by removing duplicated codes with generic ioremap() and iounmap(), and has the equivalent functioality as before. For xtensa, add hooks arch_ioremap() and arch_iounmap() for xtensa's special operation when ioremap() and iounmap(). Then define and implement its own ioremap() and ioremap_cache() via ioremap_prot(). Signed-off-by: Baoquan He <bhe@xxxxxxxxxx> Cc: Chris Zankel <chris@xxxxxxxxxx> Cc: Max Filippov <jcmvbkbc@xxxxxxxxx> Cc: linux-xtensa@xxxxxxxxxxxxxxxx --- arch/xtensa/Kconfig | 1 + arch/xtensa/include/asm/io.h | 39 +++++++++++-------------- arch/xtensa/mm/ioremap.c | 56 ++++++++---------------------------- 3 files changed, 30 insertions(+), 66 deletions(-) diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 12ac277282ba..ee3a638f5458 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -29,6 +29,7 @@ config XTENSA select GENERIC_LIB_UCMPDI2 select GENERIC_PCI_IOMAP select GENERIC_SCHED_CLOCK + select GENERIC_IOREMAP if MMU select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index a5b707e1c0f4..126af8de5722 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h @@ -16,6 +16,7 @@ #include <asm/vectors.h> #include <linux/bug.h> #include <linux/kernel.h> +#include <linux/pgtable.h> #include <linux/types.h> @@ -23,23 +24,29 @@ #define IO_SPACE_LIMIT ~0 #define PCI_IOBASE ((void __iomem *)XCHAL_KIO_BYPASS_VADDR) -#ifdef CONFIG_MMU - -void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size); -void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size); -void xtensa_iounmap(volatile void __iomem *addr); - /* - * Return the virtual address for the specified bus memory. + * I/O memory mapping functions. */ +void __iomem * +arch_ioremap(phys_addr_t *paddr, size_t size, unsigned long *prot_val); +#define arch_ioremap arch_ioremap + +bool arch_iounmap(void __iomem *addr); +#define arch_iounmap arch_iounmap + +void __iomem *ioremap_prot(phys_addr_t paddr, size_t size, + unsigned long prot); + static inline void __iomem *ioremap(unsigned long offset, unsigned long size) { if (offset >= XCHAL_KIO_PADDR && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE) return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR); else - return xtensa_ioremap_nocache(offset, size); + return ioremap_prot(offset, size, + pgprot_val(pgprot_noncached(PAGE_KERNEL))); } +#define ioremap ioremap static inline void __iomem *ioremap_cache(unsigned long offset, unsigned long size) @@ -48,22 +55,10 @@ static inline void __iomem *ioremap_cache(unsigned long offset, && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE) return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR); else - return xtensa_ioremap_cache(offset, size); -} -#define ioremap_cache ioremap_cache - -static inline void iounmap(volatile void __iomem *addr) -{ - unsigned long va = (unsigned long) addr; + return ioremap_prot(offset, size, pgprot_val(PAGE_KERNEL)); - if (!(va >= XCHAL_KIO_CACHED_VADDR && - va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) && - !(va >= XCHAL_KIO_BYPASS_VADDR && - va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE)) - xtensa_iounmap(addr); } - -#endif /* CONFIG_MMU */ +#define ioremap_cache ioremap_cache #include <asm-generic/io.h> diff --git a/arch/xtensa/mm/ioremap.c b/arch/xtensa/mm/ioremap.c index a400188c16b9..b76c3f264f6f 100644 --- a/arch/xtensa/mm/ioremap.c +++ b/arch/xtensa/mm/ioremap.c @@ -6,60 +6,28 @@ */ #include <linux/io.h> -#include <linux/vmalloc.h> #include <linux/pgtable.h> #include <asm/cacheflush.h> #include <asm/io.h> -static void __iomem *xtensa_ioremap(unsigned long paddr, unsigned long size, - pgprot_t prot) +void __iomem * +arch_ioremap(phys_addr_t *paddr, size_t size, unsigned long *prot_val) { - unsigned long offset = paddr & ~PAGE_MASK; - unsigned long pfn = __phys_to_pfn(paddr); - struct vm_struct *area; - unsigned long vaddr; - int err; - - paddr &= PAGE_MASK; - + unsigned long pfn = __phys_to_pfn((*paddr)); WARN_ON(pfn_valid(pfn)); - size = PAGE_ALIGN(offset + size); - - area = get_vm_area(size, VM_IOREMAP); - if (!area) - return NULL; - - vaddr = (unsigned long)area->addr; - area->phys_addr = paddr; - - err = ioremap_page_range(vaddr, vaddr + size, paddr, prot); - - if (err) { - vunmap((void *)vaddr); - return NULL; - } - - flush_cache_vmap(vaddr, vaddr + size); - return (void __iomem *)(offset + vaddr); -} - -void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size) -{ - return xtensa_ioremap(addr, size, pgprot_noncached(PAGE_KERNEL)); + return NULL; } -EXPORT_SYMBOL(xtensa_ioremap_nocache); -void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size) +bool arch_iounmap(void __iomem *addr) { - return xtensa_ioremap(addr, size, PAGE_KERNEL); -} -EXPORT_SYMBOL(xtensa_ioremap_cache); + unsigned long va = (unsigned long) addr; -void xtensa_iounmap(volatile void __iomem *io_addr) -{ - void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); + if ((va >= XCHAL_KIO_CACHED_VADDR && + va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) || + (va >= XCHAL_KIO_BYPASS_VADDR && + va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE)) + return false; - vunmap(addr); + return true; } -EXPORT_SYMBOL(xtensa_iounmap); -- 2.34.1