Use ioremap() as the main implemented function, and defines ioremap_nocache() as a deprecated alias of ioremap() in preparation of removing ioremap_nocache() entirely. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- arch/x86/include/asm/io.h | 8 ++------ arch/x86/mm/ioremap.c | 8 ++++---- arch/x86/mm/pageattr.c | 4 ++-- 3 files changed, 8 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 6bed97ff6db2..6b5cc41319a7 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -180,8 +180,6 @@ static inline unsigned int isa_virt_to_bus(volatile void *address) * The default ioremap() behavior is non-cached; if you need something * else, you probably want one of the following. */ -extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); -#define ioremap_nocache ioremap_nocache extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); #define ioremap_uc ioremap_uc extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); @@ -205,11 +203,9 @@ extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long * If the area you are trying to map is a PCI BAR you should have a * look at pci_iomap(). */ -static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) -{ - return ioremap_nocache(offset, size); -} +void __iomem *ioremap(resource_size_t offset, unsigned long size); #define ioremap ioremap +#define ioremap_nocache ioremap extern void iounmap(volatile void __iomem *addr); #define iounmap iounmap diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index a39dcdb5ae34..7985233dfb8d 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -280,11 +280,11 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size, } /** - * ioremap_nocache - map bus memory into CPU space + * ioremap - map bus memory into CPU space * @phys_addr: bus address of the memory * @size: size of the resource to map * - * ioremap_nocache performs a platform specific sequence of operations to + * ioremap performs a platform specific sequence of operations to * make bus memory CPU accessible via the readb/readw/readl/writeb/ * writew/writel functions and the other mmio helpers. The returned * address is not guaranteed to be usable directly as a virtual @@ -300,7 +300,7 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size, * * Must be freed with iounmap. */ -void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) +void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) { /* * Ideally, this should be: @@ -315,7 +315,7 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) return __ioremap_caller(phys_addr, size, pcm, __builtin_return_address(0), false); } -EXPORT_SYMBOL(ioremap_nocache); +EXPORT_SYMBOL(ioremap); /** * ioremap_uc - map bus memory into CPU space as strongly uncachable diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 0d09cc5aad61..1b99ad05b117 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -1784,7 +1784,7 @@ static inline int cpa_clear_pages_array(struct page **pages, int numpages, int _set_memory_uc(unsigned long addr, int numpages) { /* - * for now UC MINUS. see comments in ioremap_nocache() + * for now UC MINUS. see comments in ioremap() * If you really need strong UC use ioremap_uc(), but note * that you cannot override IO areas with set_memory_*() as * these helpers cannot work with IO memory. @@ -1799,7 +1799,7 @@ int set_memory_uc(unsigned long addr, int numpages) int ret; /* - * for now UC MINUS. see comments in ioremap_nocache() + * for now UC MINUS. see comments in ioremap() */ ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, _PAGE_CACHE_MODE_UC_MINUS, NULL); -- 2.20.1