Add hook arch_ioremap() and arch_iounmap for sh's special operation when ioremap() and iounmap(), then ioremap_cache() is converted to use ioremap_prot() from GENERIC_IOREMAP. Signed-off-by: Baoquan He <bhe@xxxxxxxxxx> Cc: Yoshinori Sato <ysato@xxxxxxxxxxxxxxxxxxxx> Cc: Rich Felker <dalias@xxxxxxxx> Cc: linux-sh@xxxxxxxxxxxxxxx --- arch/sh/Kconfig | 1 + arch/sh/include/asm/io.h | 47 +++++++++---------------------- arch/sh/mm/ioremap.c | 61 ++++++++-------------------------------- 3 files changed, 26 insertions(+), 83 deletions(-) diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 5f220e903e5a..b63ad4698cf8 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -25,6 +25,7 @@ config SUPERH select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select GUP_GET_PTE_LOW_HIGH if X2TLB + select GENERIC_IOREMAP if MMU select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_KGDB select HAVE_ARCH_SECCOMP_FILTER diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index fba90e670ed4..3c5ff82a511a 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -242,45 +242,26 @@ unsigned long long poke_real_address_q(unsigned long long addr, #define phys_to_virt(address) (__va(address)) #endif -#ifdef CONFIG_MMU -void iounmap(void __iomem *addr); -void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size, - pgprot_t prot, void *caller); - -static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) -{ - return __ioremap_caller(offset, size, PAGE_KERNEL_NOCACHE, - __builtin_return_address(0)); -} - -static inline void __iomem * -ioremap_cache(phys_addr_t offset, unsigned long size) -{ - return __ioremap_caller(offset, size, PAGE_KERNEL, - __builtin_return_address(0)); -} -#define ioremap_cache ioremap_cache +/* + * I/O memory mapping functions. + */ +void __iomem * +arch_ioremap(phys_addr_t *paddr, size_t size, unsigned long *prot_val); +#define arch_ioremap arch_ioremap -#ifdef CONFIG_HAVE_IOREMAP_PROT -static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, - unsigned long flags) -{ - return __ioremap_caller(offset, size, __pgprot(flags), - __builtin_return_address(0)); -} -#endif /* CONFIG_HAVE_IOREMAP_PROT */ +int arch_iounmap(void __iomem *addr); +#define arch_iounmap arch_iounmap -#else /* CONFIG_MMU */ -static inline void __iomem *ioremap(phys_addr_t offset, size_t size) -{ - return (void __iomem *)(unsigned long)offset; -} +#define _PAGE_IOREMAP pgprot_val(PAGE_KERNEL_NOCACHE) -static inline void iounmap(volatile void __iomem *addr) { } -#endif /* CONFIG_MMU */ +#define ioremap_cache(addr, size) \ + ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL)) +#define ioremap_cache ioremap_cache #define ioremap_uc ioremap +#include <asm-generic/io.h> + /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 21342581144d..720a9186b06b 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -72,22 +72,12 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) #define __ioremap_29bit(offset, size, prot) NULL #endif /* CONFIG_29BIT */ -/* - * Remap an arbitrary physical address space into the kernel virtual - * address space. Needed when the kernel wants to access high addresses - * directly. - * - * NOTE! We need to allow non-page-aligned mappings too: we will obviously - * have to convert them into an offset in a page-aligned mapping, but the - * caller shouldn't need to know that small detail. - */ -void __iomem * __ref -__ioremap_caller(phys_addr_t phys_addr, unsigned long size, - pgprot_t pgprot, void *caller) +void __iomem * +arch_ioremap(phys_addr_t *paddr, size_t size, unsigned long *prot_val) { - struct vm_struct *area; - unsigned long offset, last_addr, addr, orig_addr; + unsigned long last_addr, phys_addr = *paddr; void __iomem *mapped; + pgprot_t pgprot = __pgprot(*prot_val); mapped = __ioremap_trapped(phys_addr, size); if (mapped) @@ -100,7 +90,7 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size, /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) - return NULL; + return IOMEM_ERR_PTR(-EINVAL); /* * If we can't yet use the regular approach, go the fixmap route. @@ -116,30 +106,8 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size, if (mapped && !IS_ERR(mapped)) return mapped; - /* - * Mappings have to be page-aligned - */ - offset = phys_addr & ~PAGE_MASK; - phys_addr &= PAGE_MASK; - size = PAGE_ALIGN(last_addr+1) - phys_addr; - - /* - * Ok, go for it.. - */ - area = get_vm_area_caller(size, VM_IOREMAP, caller); - if (!area) - return NULL; - area->phys_addr = phys_addr; - orig_addr = addr = (unsigned long)area->addr; - - if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { - vunmap((void *)orig_addr); - return NULL; - } - - return (void __iomem *)(offset + (char *)orig_addr); + return NULL; } -EXPORT_SYMBOL(__ioremap_caller); /* * Simple checks for non-translatable mappings. @@ -158,7 +126,7 @@ static inline int iomapping_nontranslatable(unsigned long offset) return 0; } -void iounmap(void __iomem *addr) +int arch_iounmap(void __iomem *addr) { unsigned long vaddr = (unsigned long __force)addr; struct vm_struct *p; @@ -167,26 +135,19 @@ void iounmap(void __iomem *addr) * Nothing to do if there is no translatable mapping. */ if (iomapping_nontranslatable(vaddr)) - return; + return -EINVAL; /* * There's no VMA if it's from an early fixed mapping. */ if (iounmap_fixed(addr) == 0) - return; + return -EINVAL; /* * If the PMB handled it, there's nothing else to do. */ if (pmb_unmap(addr) == 0) - return; + return -EINVAL; - p = remove_vm_area((void *)(vaddr & PAGE_MASK)); - if (!p) { - printk(KERN_ERR "%s: bad address %p\n", __func__, addr); - return; - } - - kfree(p); + return 0; } -EXPORT_SYMBOL(iounmap); -- 2.34.1