On Mon, May 15, 2023 at 05:08:38PM +0800, Baoquan He wrote: > By taking GENERIC_IOREMAP method, the generic generic_ioremap_prot(), > generic_iounmap(), and their generic wrapper ioremap_prot(), ioremap() > and iounmap() are all visible and available to arch. Arch needs to > provide wrapper functions to override the generic versions if there's > arch specific handling in its ioremap_prot(), ioremap() or iounmap(). > This change will simplify implementation by removing duplicated codes > with generic_ioremap_prot() and generic_iounmap(), and has the equivalent > functioality as before. > > Here, add wrapper functions ioremap_prot() and iounmap() for arc's > special operation when ioremap_prot() and iounmap(). > > Signed-off-by: Baoquan He <bhe@xxxxxxxxxx> > Cc: Vineet Gupta <vgupta@xxxxxxxxxx> > Cc: linux-snps-arc@xxxxxxxxxxxxxxxxxxx Reviewed-by: Mike Rapoport (IBM) <rppt@xxxxxxxxxx> > --- > arch/arc/Kconfig | 1 + > arch/arc/include/asm/io.h | 7 +++--- > arch/arc/mm/ioremap.c | 49 ++++----------------------------------- > 3 files changed, 8 insertions(+), 49 deletions(-) > > diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig > index ab6d701365bb..3a666ee0c0bc 100644 > --- a/arch/arc/Kconfig > +++ b/arch/arc/Kconfig > @@ -26,6 +26,7 @@ config ARC > select GENERIC_PENDING_IRQ if SMP > select GENERIC_SCHED_CLOCK > select GENERIC_SMP_IDLE_THREAD > + select GENERIC_IOREMAP > select HAVE_ARCH_KGDB > select HAVE_ARCH_TRACEHOOK > select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4 > diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h > index 80347382a380..4fdb7350636c 100644 > --- a/arch/arc/include/asm/io.h > +++ b/arch/arc/include/asm/io.h > @@ -21,8 +21,9 @@ > #endif > > extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size); > -extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, > - unsigned long flags); > +#define ioremap ioremap > +#define ioremap_prot ioremap_prot > +#define iounmap iounmap > static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) > { > return (void __iomem *)port; > @@ -32,8 +33,6 @@ static inline void ioport_unmap(void __iomem *addr) > { > } > > -extern void iounmap(const volatile void __iomem *addr); > - > /* > * io{read,write}{16,32}be() macros > */ > diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c > index 712c2311daef..b07004d53267 100644 > --- a/arch/arc/mm/ioremap.c > +++ b/arch/arc/mm/ioremap.c > @@ -8,7 +8,6 @@ > #include <linux/module.h> > #include <linux/io.h> > #include <linux/mm.h> > -#include <linux/slab.h> > #include <linux/cache.h> > > static inline bool arc_uncached_addr_space(phys_addr_t paddr) > @@ -25,13 +24,6 @@ static inline bool arc_uncached_addr_space(phys_addr_t paddr) > > void __iomem *ioremap(phys_addr_t paddr, unsigned long size) > { > - phys_addr_t end; > - > - /* Don't allow wraparound or zero size */ > - end = paddr + size - 1; > - if (!size || (end < paddr)) > - return NULL; > - > /* > * If the region is h/w uncached, MMU mapping can be elided as optim > * The cast to u32 is fine as this region can only be inside 4GB > @@ -51,55 +43,22 @@ EXPORT_SYMBOL(ioremap); > * ARC hardware uncached region, this one still goes thru the MMU as caller > * might need finer access control (R/W/X) > */ > -void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, > +void __iomem *ioremap_prot(phys_addr_t paddr, size_t size, > unsigned long flags) > { > - unsigned int off; > - unsigned long vaddr; > - struct vm_struct *area; > - phys_addr_t end; > pgprot_t prot = __pgprot(flags); > > - /* Don't allow wraparound, zero size */ > - end = paddr + size - 1; > - if ((!size) || (end < paddr)) > - return NULL; > - > - /* An early platform driver might end up here */ > - if (!slab_is_available()) > - return NULL; > - > /* force uncached */ > - prot = pgprot_noncached(prot); > - > - /* Mappings have to be page-aligned */ > - off = paddr & ~PAGE_MASK; > - paddr &= PAGE_MASK_PHYS; > - size = PAGE_ALIGN(end + 1) - paddr; > - > - /* > - * Ok, go for it.. > - */ > - area = get_vm_area(size, VM_IOREMAP); > - if (!area) > - return NULL; > - area->phys_addr = paddr; > - vaddr = (unsigned long)area->addr; > - if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) { > - vunmap((void __force *)vaddr); > - return NULL; > - } > - return (void __iomem *)(off + (char __iomem *)vaddr); > + return generic_ioremap_prot(paddr, size, pgprot_noncached(prot)); > } > EXPORT_SYMBOL(ioremap_prot); > > - > -void iounmap(const volatile void __iomem *addr) > +void iounmap(volatile void __iomem *addr) > { > /* weird double cast to handle phys_addr_t > 32 bits */ > if (arc_uncached_addr_space((phys_addr_t)(u32)addr)) > return; > > - vfree((void *)(PAGE_MASK & (unsigned long __force)addr)); > + generic_iounmap(addr); > } > EXPORT_SYMBOL(iounmap); > -- > 2.34.1 > > -- Sincerely yours, Mike.