+ mm-ioremap-rename-ioremap_page_range-to-ioremap_range.patch added to mm-unstable branch

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled
     Subject: mm: ioremap: rename ioremap_page_range() to ioremap_range()
has been added to the -mm mm-unstable branch.  Its filename is
     mm-ioremap-rename-ioremap_page_range-to-ioremap_range.patch

This patch will shortly appear at
     https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-ioremap-rename-ioremap_page_range-to-ioremap_range.patch

This patch will later appear in the mm-unstable branch at
    git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Before you just go and hit "reply", please:
   a) Consider who else should be cc'ed
   b) Prefer to cc a suitable mailing list as well
   c) Ideally: find the original patch on the mailing list and do a
      reply-to-all to that, adding suitable additional cc's

*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***

The -mm tree is included into linux-next via the mm-everything
branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
and is updated there every 2-3 working days

------------------------------------------------------
From: Baoquan He <bhe@xxxxxxxxxx>
Subject: mm: ioremap: rename ioremap_page_range() to ioremap_range()
Date: Mon, 6 Jun 2022 16:39:09 +0800

Because the current ioremap_page_range() only maps IO address to kernel
virtual address, no struct page pointer passed in or page handling
related.  So rename it here.

The renaming is done with below command:
sed -i "s/ioremap_page_range/ioremap_range/g" `git grep -l ioremap_page_range`

Link: https://lkml.kernel.org/r/20220606083909.363350-6-bhe@xxxxxxxxxx
Signed-off-by: Baoquan He <bhe@xxxxxxxxxx>
Cc: Nicholas Piggin <npiggin@xxxxxxxxx>
Cc: Uladzislau Rezki (Sony) <urezki@xxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---

 Documentation/ia64/aliasing.rst  |    2 +-
 arch/alpha/kernel/proto.h        |    2 +-
 arch/arc/mm/ioremap.c            |    2 +-
 arch/arm/mm/ioremap.c            |    6 +++---
 arch/arm64/mm/ioremap.c          |    2 +-
 arch/hexagon/mm/ioremap.c        |    2 +-
 arch/ia64/mm/ioremap.c           |    2 +-
 arch/mips/loongson64/init.c      |    2 +-
 arch/mips/mm/ioremap.c           |    2 +-
 arch/openrisc/mm/ioremap.c       |    2 +-
 arch/parisc/mm/ioremap.c         |    2 +-
 arch/powerpc/kernel/isa-bridge.c |    2 +-
 arch/powerpc/kernel/pci_64.c     |    2 +-
 arch/powerpc/mm/ioremap.c        |    2 +-
 arch/s390/pci/pci.c              |    2 +-
 arch/sh/kernel/cpu/sh4/sq.c      |    2 +-
 arch/sh/mm/ioremap.c             |    2 +-
 arch/x86/mm/ioremap.c            |    2 +-
 arch/xtensa/mm/ioremap.c         |    2 +-
 drivers/pci/pci.c                |    2 +-
 include/linux/io.h               |    4 ++--
 mm/ioremap.c                     |    2 +-
 mm/vmalloc.c                     |    2 +-
 23 files changed, 26 insertions(+), 26 deletions(-)

--- a/arch/alpha/kernel/proto.h~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/alpha/kernel/proto.h
@@ -187,7 +187,7 @@ __alpha_remap_area_pages(unsigned long a
 
 	prot = __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE
 			| _PAGE_KWE | flags);
-	return ioremap_page_range(address, address + size, phys_addr, prot);
+	return ioremap_range(address, address + size, phys_addr, prot);
 }
 
 /* irq.c */
--- a/arch/arc/mm/ioremap.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/arc/mm/ioremap.c
@@ -85,7 +85,7 @@ void __iomem *ioremap_prot(phys_addr_t p
 		return NULL;
 	area->phys_addr = paddr;
 	vaddr = (unsigned long)area->addr;
-	if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
+	if (ioremap_range(vaddr, vaddr + size, paddr, prot)) {
 		vunmap((void __force *)vaddr);
 		return NULL;
 	}
--- a/arch/arm64/mm/ioremap.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/arm64/mm/ioremap.c
@@ -52,7 +52,7 @@ static void __iomem *__ioremap_caller(ph
 	addr = (unsigned long)area->addr;
 	area->phys_addr = phys_addr;
 
-	err = ioremap_page_range(addr, addr + size, phys_addr, prot);
+	err = ioremap_range(addr, addr + size, phys_addr, prot);
 	if (err) {
 		vunmap((void *)addr);
 		return NULL;
--- a/arch/arm/mm/ioremap.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/arm/mm/ioremap.c
@@ -110,7 +110,7 @@ void __init add_static_vm_early(struct s
 int ioremap_page(unsigned long virt, unsigned long phys,
 		 const struct mem_type *mtype)
 {
-	return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
+	return ioremap_range(virt, virt + PAGE_SIZE, phys,
 				  __pgprot(mtype->prot_pte));
 }
 EXPORT_SYMBOL(ioremap_page);
@@ -312,7 +312,7 @@ static void __iomem * __arm_ioremap_pfn_
 		err = remap_area_sections(addr, pfn, size, type);
 	} else
 #endif
-		err = ioremap_page_range(addr, addr + size, paddr,
+		err = ioremap_range(addr, addr + size, paddr,
 					 __pgprot(type->prot_pte));
 
 	if (err) {
@@ -473,7 +473,7 @@ int pci_remap_iospace(const struct resou
 	if (res->end > IO_SPACE_LIMIT)
 		return -EINVAL;
 
-	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+	return ioremap_range(vaddr, vaddr + resource_size(res), phys_addr,
 				  __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
 }
 EXPORT_SYMBOL(pci_remap_iospace);
--- a/arch/hexagon/mm/ioremap.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/hexagon/mm/ioremap.c
@@ -30,7 +30,7 @@ void __iomem *ioremap(unsigned long phys
 	area = get_vm_area(size, VM_IOREMAP);
 	addr = (unsigned long)area->addr;
 
-	if (ioremap_page_range(addr, addr+size, phys_addr, prot)) {
+	if (ioremap_range(addr, addr+size, phys_addr, prot)) {
 		vunmap((void *)addr);
 		return NULL;
 	}
--- a/arch/ia64/mm/ioremap.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/ia64/mm/ioremap.c
@@ -86,7 +86,7 @@ ioremap (unsigned long phys_addr, unsign
 
 		area->phys_addr = phys_addr;
 		addr = (void __iomem *) area->addr;
-		if (ioremap_page_range((unsigned long) addr,
+		if (ioremap_range((unsigned long) addr,
 				(unsigned long) addr + size, phys_addr, prot)) {
 			vunmap((void __force *) addr);
 			return NULL;
--- a/arch/mips/loongson64/init.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/mips/loongson64/init.c
@@ -162,7 +162,7 @@ static int __init add_legacy_isa_io(stru
 
 	vaddr = PCI_IOBASE + range->io_start;
 
-	ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
+	ioremap_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
 
 	return 0;
 }
--- a/arch/mips/mm/ioremap.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/mips/mm/ioremap.c
@@ -101,7 +101,7 @@ void __iomem *ioremap_prot(phys_addr_t p
 	vaddr = (unsigned long)area->addr;
 
 	flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE;
-	if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
+	if (ioremap_range(vaddr, vaddr + size, phys_addr,
 			__pgprot(flags))) {
 		free_vm_area(area);
 		return NULL;
--- a/arch/openrisc/mm/ioremap.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/openrisc/mm/ioremap.c
@@ -64,7 +64,7 @@ void __iomem *__ref ioremap(phys_addr_t
 		fixmaps_used += (size >> PAGE_SHIFT);
 	}
 
-	if (ioremap_page_range(v, v + size, p,
+	if (ioremap_range(v, v + size, p,
 			__pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI))) {
 		if (likely(mem_init_done))
 			vfree(area->addr);
--- a/arch/parisc/mm/ioremap.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/parisc/mm/ioremap.c
@@ -80,7 +80,7 @@ void __iomem *ioremap(unsigned long phys
 		return NULL;
 
 	addr = (void __iomem *) area->addr;
-	if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
+	if (ioremap_range((unsigned long)addr, (unsigned long)addr + size,
 			       phys_addr, pgprot)) {
 		vunmap(addr);
 		return NULL;
--- a/arch/powerpc/kernel/isa-bridge.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/powerpc/kernel/isa-bridge.c
@@ -46,7 +46,7 @@ static void remap_isa_base(phys_addr_t p
 	WARN_ON_ONCE(size & ~PAGE_MASK);
 
 	if (slab_is_available()) {
-		if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
+		if (ioremap_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
 				pgprot_noncached(PAGE_KERNEL)))
 			vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size);
 	} else {
--- a/arch/powerpc/kernel/pci_64.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/powerpc/kernel/pci_64.c
@@ -138,7 +138,7 @@ void __iomem *ioremap_phb(phys_addr_t pa
 		return NULL;
 
 	addr = (unsigned long)area->addr;
-	if (ioremap_page_range(addr, addr + size, paddr,
+	if (ioremap_range(addr, addr + size, paddr,
 			pgprot_noncached(PAGE_KERNEL))) {
 		vunmap_range(addr, addr + size);
 		return NULL;
--- a/arch/powerpc/mm/ioremap.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/powerpc/mm/ioremap.c
@@ -89,7 +89,7 @@ void __iomem *do_ioremap(phys_addr_t pa,
 	area->phys_addr = pa;
 	va = (unsigned long)area->addr;
 
-	ret = ioremap_page_range(va, va + size, pa, prot);
+	ret = ioremap_range(va, va + size, pa, prot);
 	if (!ret)
 		return (void __iomem *)area->addr + offset;
 
--- a/arch/s390/pci/pci.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/s390/pci/pci.c
@@ -252,7 +252,7 @@ static void __iomem *__ioremap(phys_addr
 		return NULL;
 
 	vaddr = (unsigned long) area->addr;
-	if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
+	if (ioremap_range(vaddr, vaddr + size, addr, prot)) {
 		free_vm_area(area);
 		return NULL;
 	}
--- a/arch/sh/kernel/cpu/sh4/sq.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/sh/kernel/cpu/sh4/sq.c
@@ -110,7 +110,7 @@ static int __sq_remap(struct sq_mapping
 
 	vma->phys_addr = map->addr;
 
-	if (ioremap_page_range((unsigned long)vma->addr,
+	if (ioremap_range((unsigned long)vma->addr,
 			       (unsigned long)vma->addr + map->size,
 			       vma->phys_addr, prot)) {
 		vunmap(vma->addr);
--- a/arch/sh/mm/ioremap.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/sh/mm/ioremap.c
@@ -132,7 +132,7 @@ __ioremap_caller(phys_addr_t phys_addr,
 	area->phys_addr = phys_addr;
 	orig_addr = addr = (unsigned long)area->addr;
 
-	if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
+	if (ioremap_range(addr, addr + size, phys_addr, pgprot)) {
 		vunmap((void *)orig_addr);
 		return NULL;
 	}
--- a/arch/x86/mm/ioremap.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/x86/mm/ioremap.c
@@ -286,7 +286,7 @@ __ioremap_caller(resource_size_t phys_ad
 	if (memtype_kernel_map_sync(phys_addr, size, pcm))
 		goto err_free_area;
 
-	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
+	if (ioremap_range(vaddr, vaddr + size, phys_addr, prot))
 		goto err_free_area;
 
 	ret_addr = (void __iomem *) (vaddr + offset);
--- a/arch/xtensa/mm/ioremap.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/arch/xtensa/mm/ioremap.c
@@ -33,7 +33,7 @@ static void __iomem *xtensa_ioremap(unsi
 	vaddr = (unsigned long)area->addr;
 	area->phys_addr = paddr;
 
-	err = ioremap_page_range(vaddr, vaddr + size, paddr, prot);
+	err = ioremap_range(vaddr, vaddr + size, paddr, prot);
 
 	if (err) {
 		vunmap((void *)vaddr);
--- a/Documentation/ia64/aliasing.rst~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/Documentation/ia64/aliasing.rst
@@ -165,7 +165,7 @@ ioremap()
 
 	If the granule contains non-WB memory, but we can cover the
 	region safely with kernel page table mappings, we can use
-	ioremap_page_range() as most other architectures do.
+	ioremap_range() as most other architectures do.
 
 	Failing all of the above, we have to fall back to a UC mapping.
 
--- a/drivers/pci/pci.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/drivers/pci/pci.c
@@ -4232,7 +4232,7 @@ int pci_remap_iospace(const struct resou
 	if (res->end > IO_SPACE_LIMIT)
 		return -EINVAL;
 
-	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+	return ioremap_range(vaddr, vaddr + resource_size(res), phys_addr,
 				  pgprot_device(PAGE_KERNEL));
 #else
 	/*
--- a/include/linux/io.h~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/include/linux/io.h
@@ -21,10 +21,10 @@ void __ioread32_copy(void *to, const voi
 void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
 
 #ifdef CONFIG_MMU
-int ioremap_page_range(unsigned long addr, unsigned long end,
+int ioremap_range(unsigned long addr, unsigned long end,
 		       phys_addr_t phys_addr, pgprot_t prot);
 #else
-static inline int ioremap_page_range(unsigned long addr, unsigned long end,
+static inline int ioremap_range(unsigned long addr, unsigned long end,
 				     phys_addr_t phys_addr, pgprot_t prot)
 {
 	return 0;
--- a/mm/ioremap.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/mm/ioremap.c
@@ -33,7 +33,7 @@ void __iomem *ioremap_prot(phys_addr_t a
 		return NULL;
 	vaddr = (unsigned long)area->addr;
 
-	if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
+	if (ioremap_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
 		free_vm_area(area);
 		return NULL;
 	}
--- a/mm/vmalloc.c~mm-ioremap-rename-ioremap_page_range-to-ioremap_range
+++ a/mm/vmalloc.c
@@ -312,7 +312,7 @@ static int vmap_range_noflush(unsigned l
 	return err;
 }
 
-int ioremap_page_range(unsigned long addr, unsigned long end,
+int ioremap_range(unsigned long addr, unsigned long end,
 		phys_addr_t phys_addr, pgprot_t prot)
 {
 	int err;
_

Patches currently in -mm which might be from bhe@xxxxxxxxxx are

mm-vmalloc-remove-the-unnecessary-type-check.patch
mm-vmalloc-remove-the-redundant-boundary-check.patch
mm-vmalloc-fix-typo-in-local-variable-name.patch
mm-vmalloc-add-code-comment-for-find_vmap_area_exceed_addr.patch
mm-ioremap-rename-ioremap_page_range-to-ioremap_range.patch




[Index of Archives]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux