Currently, barebox ARM arch_remap_range() will hang in an infinite loop, when called with a size that's not aligned to a page boundary. Its Linux equivalent, ioremap(), will just rounds up to page size and work correctly. Adopt the Linux behavior to make porting code easier, e.g. when calling devm_ioremap(). The only other arch_remap_range() in barebox, is PowerPC's. That one wouldn't loop indefinitely if the size isn't page aligned, so nothing to do there. Signed-off-by: Ahmad Fatoum <a.fatoum@xxxxxxxxxxxxxx> --- arch/arm/cpu/mmu_32.c | 3 +++ arch/arm/cpu/mmu_64.c | 3 +++ 2 files changed, 6 insertions(+) diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c index d0ada5866f05..c6eecf9c9d2d 100644 --- a/arch/arm/cpu/mmu_32.c +++ b/arch/arm/cpu/mmu_32.c @@ -18,6 +18,7 @@ #include <memory.h> #include <asm/system_info.h> #include <asm/sections.h> +#include <linux/pagemap.h> #include "mmu_32.h" @@ -253,6 +254,8 @@ static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t s pte_flags = get_pte_flags(map_type); pmd_flags = pte_flags_to_pmd(pte_flags); + size = PAGE_ALIGN(size); + while (size) { const bool pgdir_size_aligned = IS_ALIGNED(virt_addr, PGDIR_SIZE); u32 *pgd = (u32 *)&ttb[pgd_index(virt_addr)]; diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c index 12c4dc90b392..c6ea63e655ad 100644 --- a/arch/arm/cpu/mmu_64.c +++ b/arch/arm/cpu/mmu_64.c @@ -19,6 +19,7 @@ #include <asm/cache.h> #include <memory.h> #include <asm/system_info.h> +#include <linux/pagemap.h> #include "mmu_64.h" @@ -132,6 +133,8 @@ static void create_sections(uint64_t virt, uint64_t phys, uint64_t size, attr &= ~PTE_TYPE_MASK; + size = PAGE_ALIGN(size); + while (size) { table = ttb; for (level = 0; level < 4; level++) { -- 2.39.2