Signed-off-by: Christophe Leroy <christophe.leroy@xxxxxxxxxx> --- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/io.h | 11 ++++++++--- arch/powerpc/mm/ioremap.c | 26 +------------------------- arch/powerpc/mm/ioremap_32.c | 25 ++++++++++--------------- arch/powerpc/mm/ioremap_64.c | 22 +++++++++------------- 5 files changed, 29 insertions(+), 56 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 81c9f895d690..c8704933ae5a 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -179,6 +179,7 @@ config PPC select GENERIC_CPU_VULNERABILITIES if PPC_BARRIER_NOSPEC select GENERIC_EARLY_IOREMAP select GENERIC_GETTIMEOFDAY + select GENERIC_IOREMAP select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL select GENERIC_PCI_IOMAP if PCI diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index fc112a91d0c2..3d38f46ade16 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -859,8 +859,8 @@ static inline void iosync(void) * */ extern void __iomem *ioremap(phys_addr_t address, unsigned long size); -extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size, - unsigned long flags); +#define ioremap ioremap +#define ioremap_prot ioremap_prot extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size); #define ioremap_wc ioremap_wc @@ -874,7 +874,12 @@ void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size); #define ioremap_cache(addr, size) \ ioremap_prot((addr), (size), pgprot_val(PAGE_KERNEL)) -extern void iounmap(volatile void __iomem *addr); +#ifdef CONFIG_PPC_INDIRECT_MMIO +#define iounmap iounmap +#endif + +bool iounmap_allowed(void *addr); +#define iounmap_allowed iounmap_allowed void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size); diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c index 4f12504fb405..705e8e8ffde4 100644 --- a/arch/powerpc/mm/ioremap.c +++ b/arch/powerpc/mm/ioremap.c @@ -41,7 +41,7 @@ void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size) return __ioremap_caller(addr, size, prot, caller); } -void __iomem *ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) +void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long flags) { pte_t pte = __pte(flags); void *caller = __builtin_return_address(0); @@ -74,27 +74,3 @@ int early_ioremap_range(unsigned long ea, phys_addr_t pa, return 0; } - -void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size, - pgprot_t prot, void *caller) -{ - struct vm_struct *area; - int ret; - unsigned long va; - - area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START, IOREMAP_END, caller); - if (area == NULL) - return NULL; - - area->phys_addr = pa; - va = (unsigned long)area->addr; - - ret = ioremap_page_range(va, va + size, pa, prot); - if (!ret) - return (void __iomem *)area->addr + offset; - - vunmap_range(va, va + size); - free_vm_area(area); - - return NULL; -} diff --git a/arch/powerpc/mm/ioremap_32.c b/arch/powerpc/mm/ioremap_32.c index 9d13143b8be4..e9a60ddfd6b3 100644 --- a/arch/powerpc/mm/ioremap_32.c +++ b/arch/powerpc/mm/ioremap_32.c @@ -21,6 +21,13 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call phys_addr_t p, offset; int err; + /* + * If the address lies within the first 16 MB, assume it's in ISA + * memory space + */ + if (addr < SZ_16M) + addr += _ISA_MEM_BASE; + /* * Choose an address to map it to. * Once the vmalloc system is running, we use it. @@ -31,13 +38,6 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call offset = addr & ~PAGE_MASK; size = PAGE_ALIGN(addr + size) - p; - /* - * If the address lies within the first 16 MB, assume it's in ISA - * memory space - */ - if (p < 16 * 1024 * 1024) - p += _ISA_MEM_BASE; - #ifndef CONFIG_CRASH_DUMP /* * Don't allow anybody to remap normal RAM that we're using. @@ -63,7 +63,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call return (void __iomem *)v + offset; if (slab_is_available()) - return do_ioremap(p, offset, size, prot, caller); + return generic_ioremap_prot(addr, size, prot); /* * Should check if it is a candidate for a BAT mapping @@ -78,16 +78,11 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call return (void __iomem *)ioremap_bot + offset; } -void iounmap(volatile void __iomem *addr) +bool iounmap_allowed(void *addr) { /* * If mapped by BATs then there is nothing to do. * Calling vfree() generates a benign warning. */ - if (v_block_mapped((unsigned long)addr)) - return; - - if (addr > high_memory && (unsigned long)addr < ioremap_bot) - vunmap((void *)(PAGE_MASK & (unsigned long)addr)); + return !v_block_mapped((unsigned long)addr); } -EXPORT_SYMBOL(iounmap); diff --git a/arch/powerpc/mm/ioremap_64.c b/arch/powerpc/mm/ioremap_64.c index 3acece00b33e..a319d40253e5 100644 --- a/arch/powerpc/mm/ioremap_64.c +++ b/arch/powerpc/mm/ioremap_64.c @@ -29,7 +29,7 @@ void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size, return NULL; if (slab_is_available()) - return do_ioremap(paligned, offset, size, prot, caller); + return generic_ioremap_prot(addr, size, prot); pr_warn("ioremap() called early from %pS. Use early_ioremap() instead\n", caller); @@ -47,19 +47,15 @@ void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size, * Unmap an IO region and remove it from vmalloc'd list. * Access to IO memory should be serialized by driver. */ -void iounmap(volatile void __iomem *token) +bool iounmap_allowed(void *addr) { - void *addr; - - if (!slab_is_available()) - return; - - addr = (void *)((unsigned long __force)PCI_FIX_ADDR(token) & PAGE_MASK); + return slab_is_available(); +} - if ((unsigned long)addr < ioremap_bot) { - pr_warn("Attempt to iounmap early bolted mapping at 0x%p\n", addr); - return; - } - vunmap(addr); +#ifdef CONFIG_PPC_INDIRECT_MMIO +void iounmap(volatile void __iomem *token) +{ + generic_iounmap(PCI_FIX_ADDR(token)); } EXPORT_SYMBOL(iounmap); +#endif -- 2.37.1