Update the sh arch_get_unmapped_area[_topdown] functions to make use of vm_unmapped_area() instead of implementing a brute force search. Signed-off-by: Michel Lespinasse <walken@xxxxxxxxxx> --- arch/sh/mm/mmap.c | 126 ++++++++++------------------------------------------- 1 files changed, 24 insertions(+), 102 deletions(-) diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index afeb710ec5c3..acb3b8f71908 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -49,6 +49,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, struct vm_area_struct *vma; unsigned long start_addr; int do_colour_align; + struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate @@ -79,47 +80,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, return addr; } - if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; - } else { - mm->cached_hole_size = 0; - start_addr = addr = TASK_UNMAPPED_BASE; - } - -full_search: - if (do_colour_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(mm->free_area_cache); - - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (unlikely(TASK_SIZE - len < addr)) { - /* - * Start a new search - just in case we missed - * some holes. - */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } - if (likely(!vma || addr + len <= vma->vm_start)) { - /* - * Remember the place where we stopped the search: - */ - mm->free_area_cache = addr + len; - return addr; - } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - - addr = vma->vm_end; - if (do_colour_align) - addr = COLOUR_ALIGN(addr, pgoff); - } + info.flags = 0; + info.length = len; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + return vm_unmapped_area(&info); } unsigned long @@ -131,6 +98,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_colour_align; + struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate @@ -162,73 +130,27 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; } - /* check if free_area_cache is useful for us */ - if (len <= mm->cached_hole_size) { - mm->cached_hole_size = 0; - mm->free_area_cache = mm->mmap_base; - } - - /* either no address requested or can't fit in requested address hole */ - addr = mm->free_area_cache; - if (do_colour_align) { - unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); - - addr = base + len; - } - - /* make sure it can fit in the remaining address space */ - if (likely(addr > len)) { - vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); - } - } - - if (unlikely(mm->mmap_base < len)) - goto bottomup; - - addr = mm->mmap_base-len; - if (do_colour_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); - - do { - /* - * Lookup failure means no vma is above this address, - * else if new region fits below vma->vm_start, - * return with success: - */ - vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr); - } - - /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; - if (do_colour_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); - } while (likely(len < vma->vm_start)); - -bottomup: + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + addr = vm_unmapped_area(&info); + /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ - mm->cached_hole_size = ~0UL; - mm->free_area_cache = TASK_UNMAPPED_BASE; - addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); - /* - * Restore the topdown base: - */ - mm->free_area_cache = mm->mmap_base; - mm->cached_hole_size = ~0UL; + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } return addr; } -- 1.7.7.3