Add a pair of hooks (ioremap_page_range_hook/iounmap_page_range_hook) that can be implemented by an architecture. Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx> --- include/linux/io.h | 3 +++ mm/ioremap.c | 13 ++++++++++++- mm/vmalloc.c | 8 ++++++++ 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/include/linux/io.h b/include/linux/io.h index 9595151d800d..0ffc265f114c 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -21,6 +21,9 @@ void __ioread32_copy(void *to, const void __iomem *from, size_t count); void __iowrite64_copy(void __iomem *to, const void *from, size_t count); #ifdef CONFIG_MMU +void ioremap_page_range_hook(unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot); +void iounmap_page_range_hook(phys_addr_t phys_addr, size_t size); int ioremap_page_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot); #else diff --git a/mm/ioremap.c b/mm/ioremap.c index 8ee0136f8cb0..bd77a86088f2 100644 --- a/mm/ioremap.c +++ b/mm/ioremap.c @@ -28,10 +28,21 @@ early_param("nohugeiomap", set_nohugeiomap); static const unsigned int iomap_max_page_shift = PAGE_SHIFT; #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ +void __weak ioremap_page_range_hook(unsigned long addr, unsigned long end, + phys_addr_t phys_addr, pgprot_t prot) +{ +} + int ioremap_page_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { - return vmap_range(addr, end, phys_addr, prot, iomap_max_page_shift); + int ret; + + ret = vmap_range(addr, end, phys_addr, prot, iomap_max_page_shift); + if (!ret) + ioremap_page_range_hook(addr, end, phys_addr, prot); + + return ret; } #ifdef CONFIG_GENERIC_IOREMAP diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d5cd52805149..af18a6141093 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -38,6 +38,7 @@ #include <linux/pgtable.h> #include <linux/uaccess.h> #include <linux/hugetlb.h> +#include <linux/io.h> #include <asm/tlbflush.h> #include <asm/shmparam.h> @@ -2551,6 +2552,10 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) set_area_direct_map(area, set_direct_map_default_noflush); } +void __weak iounmap_page_range_hook(phys_addr_t phys_addr, size_t size) +{ +} + static void __vunmap(const void *addr, int deallocate_pages) { struct vm_struct *area; @@ -2574,6 +2579,9 @@ static void __vunmap(const void *addr, int deallocate_pages) kasan_poison_vmalloc(area->addr, get_vm_area_size(area)); + if (area->flags & VM_IOREMAP) + iounmap_page_range_hook(area->phys_addr, get_vm_area_size(area)); + vm_remove_mappings(area, deallocate_pages); if (deallocate_pages) { -- 2.30.2