Hi Catalin, Today's linux-next merge of the arm-lpae tree got a conflict in arch/arm/mm/ioremap.c between commit 3afb51f744b6 ("ARM: add generic ioremap optimization by reusing static mappings") from the arm tree and commit ec93d80c4b07 ("ARM: LPAE: Page table maintenance for the 3-level format") from the arm-lpae tree. I guessed (probably incorrectly) about the fix up (see below). Please supply a better fix if necessary. More generally, is the LPAE stuff going to be merged soon? The above arm-lpae commit dates from February (though it was recommitted on Nov 15) ... -- Cheers, Stephen Rothwell sfr@xxxxxxxxxxxxxxxx diff --cc arch/arm/mm/ioremap.c index 12c7ad2,d1f78ba..0000000 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@@ -194,7 -208,14 +202,8 @@@ void __iomem * __arm_ioremap_pfn_caller */ if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) return NULL; + #endif - /* - * Don't allow RAM to be mapped - this causes problems with ARMv6+ - */ - if (WARN_ON(pfn_valid(pfn))) - return NULL; - type = get_mem_type(mtype); if (!type) return NULL; @@@ -329,34 -322,28 +338,34 @@@ __arm_ioremap_exec(unsigned long phys_a void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); -#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) - struct vm_struct **p, *tmp; + struct vm_struct *vm; - /* - * If this is a section based mapping we need to handle it - * specially as the VM subsystem does not know how to handle - * such a beast. We need the lock here b/c we need to clear - * all the mappings before the area can be reclaimed - * by someone else. - */ - write_lock(&vmlist_lock); - for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { - if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { - if (tmp->flags & VM_ARM_SECTION_MAPPING) { - unmap_area_sections((unsigned long)tmp->addr, - tmp->size); - } + read_lock(&vmlist_lock); + for (vm = vmlist; vm; vm = vm->next) { + if (vm->addr > addr) + break; + if (!(vm->flags & VM_IOREMAP)) + continue; + /* If this is a static mapping we must leave it alone */ + if ((vm->flags & VM_ARM_STATIC_MAPPING) && + (vm->addr <= addr) && (vm->addr + vm->size > addr)) { + read_unlock(&vmlist_lock); + return; + } - #ifndef CONFIG_SMP ++#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) + /* + * If this is a section based mapping we need to handle it + * specially as the VM subsystem does not know how to handle + * such a beast. + */ + if ((vm->addr == addr) && + (vm->flags & VM_ARM_SECTION_MAPPING)) { + unmap_area_sections((unsigned long)vm->addr, vm->size); break; } - } - write_unlock(&vmlist_lock); #endif + } + read_unlock(&vmlist_lock); vunmap(addr); }
Attachment:
pgpjzofWWolTk.pgp
Description: PGP signature