Re: [PATCH v4 1/2] drm: Improve drm_mm search (and fix topdown allocation) with rbtrees

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Feb 2, 2017 at 6:44 AM, Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> wrote:
> The drm_mm range manager claimed to support top-down insertion, but it
> was neither searching for the top-most hole that could fit the
> allocation request nor fitting the request to the hole correctly.
>
> In order to search the range efficiently, we create a secondary index
> for the holes using either their size or their address. This index
> allows us to find the smallest hole or the hole at the bottom or top of
> the range efficiently, whilst keeping the hole stack to rapidly service
> evictions.
>
> v2: Search for holes both high and low. Rename flags to mode.
> v3: Discover rb_entry_safe() and use it!
> v4: Kerneldoc for enum drm_mm_insert_mode.
>

I haven't had drm_mm.c paged into my head in a while, but the patch
appears to do the right thing.  The series is:
Reviewed-by: Alex Deucher <alexander.deucher@xxxxxxx>

> Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx>
> Cc: Alex Deucher <alexander.deucher@xxxxxxx>
> Cc: "Christian König" <christian.koenig@xxxxxxx>
> Cc: David Airlie <airlied@xxxxxxxx>
> Cc: Russell King <rmk+kernel@xxxxxxxxxxxxxxx>
> Cc: Daniel Vetter <daniel.vetter@xxxxxxxxx>
> Cc: Jani Nikula <jani.nikula@xxxxxxxxxxxxxxx>
> Cc: Sean Paul <seanpaul@xxxxxxxxxxxx>
> Cc: Lucas Stach <l.stach@xxxxxxxxxxxxxx>
> Cc: Christian Gmeiner <christian.gmeiner@xxxxxxxxx>
> Cc: Rob Clark <robdclark@xxxxxxxxx>
> Cc: Thierry Reding <thierry.reding@xxxxxxxxx>
> Cc: Stephen Warren <swarren@xxxxxxxxxxxxx>
> Cc: Alexandre Courbot <gnurou@xxxxxxxxx>
> Cc: Eric Anholt <eric@xxxxxxxxxx>
> Cc: Sinclair Yeh <syeh@xxxxxxxxxx>
> Cc: Thomas Hellstrom <thellstrom@xxxxxxxxxx>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c  |  16 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c |  20 +-
>  drivers/gpu/drm/armada/armada_gem.c          |   4 +-
>  drivers/gpu/drm/drm_mm.c                     | 488 +++++++++++++++------------
>  drivers/gpu/drm/drm_vma_manager.c            |   3 +-
>  drivers/gpu/drm/etnaviv/etnaviv_mmu.c        |  16 +-
>  drivers/gpu/drm/i915/i915_gem.c              |  10 +-
>  drivers/gpu/drm/i915/i915_gem_evict.c        |   9 +-
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c   |   5 +-
>  drivers/gpu/drm/i915/i915_gem_gtt.c          |  39 +--
>  drivers/gpu/drm/i915/i915_gem_stolen.c       |   6 +-
>  drivers/gpu/drm/msm/msm_gem.c                |   3 +-
>  drivers/gpu/drm/msm/msm_gem_vma.c            |   3 +-
>  drivers/gpu/drm/selftests/test-drm_mm.c      |  58 ++--
>  drivers/gpu/drm/sis/sis_mm.c                 |   6 +-
>  drivers/gpu/drm/tegra/gem.c                  |   4 +-
>  drivers/gpu/drm/ttm/ttm_bo_manager.c         |  18 +-
>  drivers/gpu/drm/vc4/vc4_crtc.c               |   2 +-
>  drivers/gpu/drm/vc4/vc4_hvs.c                |   3 +-
>  drivers/gpu/drm/vc4/vc4_plane.c              |   6 +-
>  drivers/gpu/drm/via/via_mm.c                 |   4 +-
>  drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c       |  10 +-
>  include/drm/drm_mm.h                         | 184 +++++-----
>  23 files changed, 470 insertions(+), 447 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> index e4eb6dd3798a..0335c2f331e9 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
> @@ -97,8 +97,7 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
>  {
>         struct amdgpu_gtt_mgr *mgr = man->priv;
>         struct drm_mm_node *node = mem->mm_node;
> -       enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
> -       enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
> +       enum drm_mm_insert_mode mode;
>         unsigned long fpfn, lpfn;
>         int r;
>
> @@ -115,15 +114,14 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
>         else
>                 lpfn = man->size;
>
> -       if (place && place->flags & TTM_PL_FLAG_TOPDOWN) {
> -               sflags = DRM_MM_SEARCH_BELOW;
> -               aflags = DRM_MM_CREATE_TOP;
> -       }
> +       mode = DRM_MM_INSERT_BEST;
> +       if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
> +               mode = DRM_MM_INSERT_HIGH;
>
>         spin_lock(&mgr->lock);
> -       r = drm_mm_insert_node_in_range_generic(&mgr->mm, node, mem->num_pages,
> -                                               mem->page_alignment, 0,
> -                                               fpfn, lpfn, sflags, aflags);
> +       r = drm_mm_insert_node_in_range(&mgr->mm, node,
> +                                       mem->num_pages, mem->page_alignment, 0,
> +                                       fpfn, lpfn, mode);
>         spin_unlock(&mgr->lock);
>
>         if (!r) {
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> index ac9007986c11..9e577e3d3147 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
> @@ -97,8 +97,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
>         struct amdgpu_vram_mgr *mgr = man->priv;
>         struct drm_mm *mm = &mgr->mm;
>         struct drm_mm_node *nodes;
> -       enum drm_mm_search_flags sflags = DRM_MM_SEARCH_DEFAULT;
> -       enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
> +       enum drm_mm_insert_mode mode;
>         unsigned long lpfn, num_nodes, pages_per_node, pages_left;
>         unsigned i;
>         int r;
> @@ -121,10 +120,9 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
>         if (!nodes)
>                 return -ENOMEM;
>
> -       if (place->flags & TTM_PL_FLAG_TOPDOWN) {
> -               sflags = DRM_MM_SEARCH_BELOW;
> -               aflags = DRM_MM_CREATE_TOP;
> -       }
> +       mode = DRM_MM_INSERT_BEST;
> +       if (place->flags & TTM_PL_FLAG_TOPDOWN)
> +               mode = DRM_MM_INSERT_HIGH;
>
>         pages_left = mem->num_pages;
>
> @@ -135,13 +133,11 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
>
>                 if (pages == pages_per_node)
>                         alignment = pages_per_node;
> -               else
> -                       sflags |= DRM_MM_SEARCH_BEST;
>
> -               r = drm_mm_insert_node_in_range_generic(mm, &nodes[i], pages,
> -                                                       alignment, 0,
> -                                                       place->fpfn, lpfn,
> -                                                       sflags, aflags);
> +               r = drm_mm_insert_node_in_range(mm, &nodes[i],
> +                                               pages, alignment, 0,
> +                                               place->fpfn, lpfn,
> +                                               mode);
>                 if (unlikely(r))
>                         goto error;
>
> diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
> index a293c8be232c..560d416deab2 100644
> --- a/drivers/gpu/drm/armada/armada_gem.c
> +++ b/drivers/gpu/drm/armada/armada_gem.c
> @@ -148,8 +148,8 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
>                         return -ENOSPC;
>
>                 mutex_lock(&priv->linear_lock);
> -               ret = drm_mm_insert_node(&priv->linear, node, size, align,
> -                                        DRM_MM_SEARCH_DEFAULT);
> +               ret = drm_mm_insert_node_generic(&priv->linear, node,
> +                                                size, align, 0, 0);
>                 mutex_unlock(&priv->linear_lock);
>                 if (ret) {
>                         kfree(node);
> diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
> index e51876e588d6..8bfb0b327267 100644
> --- a/drivers/gpu/drm/drm_mm.c
> +++ b/drivers/gpu/drm/drm_mm.c
> @@ -97,14 +97,6 @@
>   * locking would be fully redundant.
>   */
>
> -static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
> -                                               u64 size,
> -                                               u64 alignment,
> -                                               unsigned long color,
> -                                               u64 start,
> -                                               u64 end,
> -                                               enum drm_mm_search_flags flags);
> -
>  #ifdef CONFIG_DRM_DEBUG_MM
>  #include <linux/stackdepot.h>
>
> @@ -226,69 +218,151 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
>                             &drm_mm_interval_tree_augment);
>  }
>
> -static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
> -                                struct drm_mm_node *node,
> -                                u64 size, u64 alignment,
> -                                unsigned long color,
> -                                u64 range_start, u64 range_end,
> -                                enum drm_mm_allocator_flags flags)
> +#define RB_INSERT(root, member, expr) do { \
> +       struct rb_node **link = &root.rb_node, *rb = NULL; \
> +       u64 x = expr(node); \
> +       while (*link) { \
> +               rb = *link; \
> +               if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
> +                       link = &rb->rb_left; \
> +               else \
> +                       link = &rb->rb_right; \
> +       } \
> +       rb_link_node(&node->member, rb, link); \
> +       rb_insert_color(&node->member, &root); \
> +} while (0)
> +
> +#define HOLE_SIZE(NODE) ((NODE)->hole_size)
> +#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
> +
> +static void add_hole(struct drm_mm_node *node)
>  {
> -       struct drm_mm *mm = hole_node->mm;
> -       u64 hole_start = drm_mm_hole_node_start(hole_node);
> -       u64 hole_end = drm_mm_hole_node_end(hole_node);
> -       u64 adj_start = hole_start;
> -       u64 adj_end = hole_end;
> +       struct drm_mm *mm = node->mm;
>
> -       DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node) || node->allocated);
> +       node->hole_size =
> +               __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
> +       DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
>
> -       if (mm->color_adjust)
> -               mm->color_adjust(hole_node, color, &adj_start, &adj_end);
> +       RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
> +       RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
>
> -       adj_start = max(adj_start, range_start);
> -       adj_end = min(adj_end, range_end);
> +       list_add(&node->hole_stack, &mm->hole_stack);
> +}
>
> -       if (flags & DRM_MM_CREATE_TOP)
> -               adj_start = adj_end - size;
> +static void rm_hole(struct drm_mm_node *node)
> +{
> +       DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
>
> -       if (alignment) {
> -               u64 rem;
> +       list_del(&node->hole_stack);
> +       rb_erase(&node->rb_hole_size, &node->mm->holes_size);
> +       rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
> +       node->hole_size = 0;
>
> -               div64_u64_rem(adj_start, alignment, &rem);
> -               if (rem) {
> -                       if (flags & DRM_MM_CREATE_TOP)
> -                               adj_start -= rem;
> -                       else
> -                               adj_start += alignment - rem;
> +       DRM_MM_BUG_ON(drm_mm_hole_follows(node));
> +}
> +
> +static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
> +{
> +       return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
> +}
> +
> +static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
> +{
> +       return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
> +}
> +
> +static inline u64 rb_hole_size(struct rb_node *rb)
> +{
> +       return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
> +}
> +
> +static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
> +{
> +       struct rb_node *best = NULL;
> +       struct rb_node **link = &mm->holes_size.rb_node;
> +
> +       while (*link) {
> +               struct rb_node *rb = *link;
> +
> +               if (size <= rb_hole_size(rb)) {
> +                       link = &rb->rb_left;
> +                       best = rb;
> +               } else {
> +                       link = &rb->rb_right;
>                 }
>         }
>
> -       if (adj_start == hole_start) {
> -               hole_node->hole_follows = 0;
> -               list_del(&hole_node->hole_stack);
> +       return rb_hole_size_to_node(best);
> +}
> +
> +static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
> +{
> +       struct drm_mm_node *node = NULL;
> +       struct rb_node **link = &mm->holes_addr.rb_node;
> +
> +       while (*link) {
> +               u64 hole_start;
> +
> +               node = rb_hole_addr_to_node(*link);
> +               hole_start = __drm_mm_hole_node_start(node);
> +
> +               if (addr < hole_start)
> +                       link = &node->rb_hole_addr.rb_left;
> +               else if (addr > hole_start + node->hole_size)
> +                       link = &node->rb_hole_addr.rb_right;
> +               else
> +                       break;
>         }
>
> -       node->start = adj_start;
> -       node->size = size;
> -       node->mm = mm;
> -       node->color = color;
> -       node->allocated = 1;
> +       return node;
> +}
>
> -       list_add(&node->node_list, &hole_node->node_list);
> +static struct drm_mm_node *
> +first_hole(struct drm_mm *mm,
> +          u64 start, u64 end, u64 size,
> +          enum drm_mm_insert_mode mode)
> +{
> +       if (RB_EMPTY_ROOT(&mm->holes_size))
> +               return NULL;
>
> -       drm_mm_interval_tree_add_node(hole_node, node);
> +       switch (mode) {
> +       default:
> +       case DRM_MM_INSERT_BEST:
> +               return best_hole(mm, size);
>
> -       DRM_MM_BUG_ON(node->start < range_start);
> -       DRM_MM_BUG_ON(node->start < adj_start);
> -       DRM_MM_BUG_ON(node->start + node->size > adj_end);
> -       DRM_MM_BUG_ON(node->start + node->size > range_end);
> +       case DRM_MM_INSERT_LOW:
> +               return find_hole(mm, start);
>
> -       node->hole_follows = 0;
> -       if (__drm_mm_hole_node_start(node) < hole_end) {
> -               list_add(&node->hole_stack, &mm->hole_stack);
> -               node->hole_follows = 1;
> +       case DRM_MM_INSERT_HIGH:
> +               return find_hole(mm, end);
> +
> +       case DRM_MM_INSERT_EVICT:
> +               return list_first_entry_or_null(&mm->hole_stack,
> +                                               struct drm_mm_node,
> +                                               hole_stack);
>         }
> +}
>
> -       save_stack(node);
> +static struct drm_mm_node *
> +next_hole(struct drm_mm *mm,
> +         struct drm_mm_node *node,
> +         enum drm_mm_insert_mode mode)
> +{
> +       switch (mode) {
> +       default:
> +       case DRM_MM_INSERT_BEST:
> +               return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
> +
> +       case DRM_MM_INSERT_LOW:
> +               return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
> +
> +       case DRM_MM_INSERT_HIGH:
> +               return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
> +
> +       case DRM_MM_INSERT_EVICT:
> +               node = list_next_entry(node, hole_stack);
> +               return &node->hole_stack == &mm->hole_stack ? NULL : node;
> +       }
>  }
>
>  /**
> @@ -317,21 +391,12 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
>                 return -ENOSPC;
>
>         /* Find the relevant hole to add our node to */
> -       hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
> -                                              node->start, ~(u64)0);
> -       if (hole) {
> -               if (hole->start < end)
> -                       return -ENOSPC;
> -       } else {
> -               hole = list_entry(drm_mm_nodes(mm), typeof(*hole), node_list);
> -       }
> -
> -       hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
> -       if (!drm_mm_hole_follows(hole))
> +       hole = find_hole(mm, node->start);
> +       if (!hole)
>                 return -ENOSPC;
>
>         adj_start = hole_start = __drm_mm_hole_node_start(hole);
> -       adj_end = hole_end = __drm_mm_hole_node_end(hole);
> +       adj_end = hole_end = hole_start + hole->hole_size;
>
>         if (mm->color_adjust)
>                 mm->color_adjust(hole, node->color, &adj_start, &adj_end);
> @@ -340,70 +405,130 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
>                 return -ENOSPC;
>
>         node->mm = mm;
> -       node->allocated = 1;
>
>         list_add(&node->node_list, &hole->node_list);
> -
>         drm_mm_interval_tree_add_node(hole, node);
> +       node->allocated = true;
> +       node->hole_size = 0;
>
> -       if (node->start == hole_start) {
> -               hole->hole_follows = 0;
> -               list_del(&hole->hole_stack);
> -       }
> -
> -       node->hole_follows = 0;
> -       if (end != hole_end) {
> -               list_add(&node->hole_stack, &mm->hole_stack);
> -               node->hole_follows = 1;
> -       }
> +       rm_hole(hole);
> +       if (node->start > hole_start)
> +               add_hole(hole);
> +       if (end < hole_end)
> +               add_hole(node);
>
>         save_stack(node);
> -
>         return 0;
>  }
>  EXPORT_SYMBOL(drm_mm_reserve_node);
>
>  /**
> - * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
> + * drm_mm_insert_node_in_range - ranged search for space and insert @node
>   * @mm: drm_mm to allocate from
>   * @node: preallocate node to insert
>   * @size: size of the allocation
>   * @alignment: alignment of the allocation
>   * @color: opaque tag value to use for this node
> - * @start: start of the allowed range for this node
> - * @end: end of the allowed range for this node
> - * @sflags: flags to fine-tune the allocation search
> - * @aflags: flags to fine-tune the allocation behavior
> + * @range_start: start of the allowed range for this node
> + * @range_end: end of the allowed range for this node
> + * @mode: fine-tune the allocation search and placement
>   *
>   * The preallocated @node must be cleared to 0.
>   *
>   * Returns:
>   * 0 on success, -ENOSPC if there's no suitable hole.
>   */
> -int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
> -                                       u64 size, u64 alignment,
> -                                       unsigned long color,
> -                                       u64 start, u64 end,
> -                                       enum drm_mm_search_flags sflags,
> -                                       enum drm_mm_allocator_flags aflags)
> +int drm_mm_insert_node_in_range(struct drm_mm * const mm,
> +                               struct drm_mm_node * const node,
> +                               u64 size, u64 alignment,
> +                               unsigned long color,
> +                               u64 range_start, u64 range_end,
> +                               enum drm_mm_insert_mode mode)
>  {
> -       struct drm_mm_node *hole_node;
> +       struct drm_mm_node *hole;
> +       u64 remainder_mask;
>
> -       if (WARN_ON(size == 0))
> -               return -EINVAL;
> +       DRM_MM_BUG_ON(range_start >= range_end);
>
> -       hole_node = drm_mm_search_free_in_range_generic(mm,
> -                                                       size, alignment, color,
> -                                                       start, end, sflags);
> -       if (!hole_node)
> +       if (unlikely(size == 0 || range_end - range_start < size))
>                 return -ENOSPC;
>
> -       drm_mm_insert_helper(hole_node, node,
> -                            size, alignment, color,
> -                            start, end, aflags);
> -       return 0;
> +       if (alignment <= 1)
> +               alignment = 0;
> +
> +       remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
> +       for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
> +            hole = next_hole(mm, hole, mode)) {
> +               u64 hole_start = __drm_mm_hole_node_start(hole);
> +               u64 hole_end = hole_start + hole->hole_size;
> +               u64 adj_start, adj_end;
> +               u64 col_start, col_end;
> +
> +               if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
> +                       break;
> +
> +               if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
> +                       break;
> +
> +               col_start = hole_start;
> +               col_end = hole_end;
> +               if (mm->color_adjust)
> +                       mm->color_adjust(hole, color, &col_start, &col_end);
> +
> +               adj_start = max(col_start, range_start);
> +               adj_end = min(col_end, range_end);
> +
> +               if (adj_end <= adj_start || adj_end - adj_start < size)
> +                       continue;
> +
> +               if (mode == DRM_MM_INSERT_HIGH)
> +                       adj_start = adj_end - size;
> +
> +               if (alignment) {
> +                       u64 rem;
> +
> +                       if (likely(remainder_mask))
> +                               rem = adj_start & remainder_mask;
> +                       else
> +                               div64_u64_rem(adj_start, alignment, &rem);
> +                       if (rem) {
> +                               adj_start -= rem;
> +                               if (mode != DRM_MM_INSERT_HIGH)
> +                                       adj_start += alignment;
> +
> +                               if (adj_start < max(col_start, range_start) ||
> +                                   min(col_end, range_end) - adj_start < size)
> +                                       continue;
> +
> +                               if (adj_end <= adj_start ||
> +                                   adj_end - adj_start < size)
> +                                       continue;
> +                       }
> +               }
> +
> +               node->mm = mm;
> +               node->size = size;
> +               node->start = adj_start;
> +               node->color = color;
> +               node->hole_size = 0;
> +
> +               list_add(&node->node_list, &hole->node_list);
> +               drm_mm_interval_tree_add_node(hole, node);
> +               node->allocated = true;
> +
> +               rm_hole(hole);
> +               if (adj_start > hole_start)
> +                       add_hole(hole);
> +               if (adj_start + size < hole_end)
> +                       add_hole(node);
> +
> +               save_stack(node);
> +               return 0;
> +       }
> +
> +       return -ENOSPC;
>  }
> -EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
> +EXPORT_SYMBOL(drm_mm_insert_node_in_range);
>
>  /**
>   * drm_mm_remove_node - Remove a memory node from the allocator.
> @@ -421,92 +546,20 @@ void drm_mm_remove_node(struct drm_mm_node *node)
>         DRM_MM_BUG_ON(!node->allocated);
>         DRM_MM_BUG_ON(node->scanned_block);
>
> -       prev_node =
> -           list_entry(node->node_list.prev, struct drm_mm_node, node_list);
> -
> -       if (drm_mm_hole_follows(node)) {
> -               DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) ==
> -                             __drm_mm_hole_node_end(node));
> -               list_del(&node->hole_stack);
> -       } else {
> -               DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) !=
> -                             __drm_mm_hole_node_end(node));
> -       }
> +       prev_node = list_prev_entry(node, node_list);
>
> -       if (!drm_mm_hole_follows(prev_node)) {
> -               prev_node->hole_follows = 1;
> -               list_add(&prev_node->hole_stack, &mm->hole_stack);
> -       } else
> -               list_move(&prev_node->hole_stack, &mm->hole_stack);
> +       if (drm_mm_hole_follows(node))
> +               rm_hole(node);
>
>         drm_mm_interval_tree_remove(node, &mm->interval_tree);
>         list_del(&node->node_list);
> -       node->allocated = 0;
> -}
> -EXPORT_SYMBOL(drm_mm_remove_node);
> -
> -static int check_free_hole(u64 start, u64 end, u64 size, u64 alignment)
> -{
> -       if (end - start < size)
> -               return 0;
> -
> -       if (alignment) {
> -               u64 rem;
> -
> -               div64_u64_rem(start, alignment, &rem);
> -               if (rem)
> -                       start += alignment - rem;
> -       }
> +       node->allocated = false;
>
> -       return end >= start + size;
> -}
> -
> -static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
> -                                                       u64 size,
> -                                                       u64 alignment,
> -                                                       unsigned long color,
> -                                                       u64 start,
> -                                                       u64 end,
> -                                                       enum drm_mm_search_flags flags)
> -{
> -       struct drm_mm_node *entry;
> -       struct drm_mm_node *best;
> -       u64 adj_start;
> -       u64 adj_end;
> -       u64 best_size;
> -
> -       DRM_MM_BUG_ON(mm->scan_active);
> -
> -       best = NULL;
> -       best_size = ~0UL;
> -
> -       __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
> -                              flags & DRM_MM_SEARCH_BELOW) {
> -               u64 hole_size = adj_end - adj_start;
> -
> -               if (mm->color_adjust) {
> -                       mm->color_adjust(entry, color, &adj_start, &adj_end);
> -                       if (adj_end <= adj_start)
> -                               continue;
> -               }
> -
> -               adj_start = max(adj_start, start);
> -               adj_end = min(adj_end, end);
> -
> -               if (!check_free_hole(adj_start, adj_end, size, alignment))
> -                       continue;
> -
> -               if (!(flags & DRM_MM_SEARCH_BEST))
> -                       return entry;
> -
> -               if (hole_size < best_size) {
> -                       best = entry;
> -                       best_size = hole_size;
> -               }
> -       }
> -
> -       return best;
> +       if (drm_mm_hole_follows(prev_node))
> +               rm_hole(prev_node);
> +       add_hole(prev_node);
>  }
> +EXPORT_SYMBOL(drm_mm_remove_node);
>
>  /**
>   * drm_mm_replace_node - move an allocation from @old to @new
> @@ -521,18 +574,23 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
>  {
>         DRM_MM_BUG_ON(!old->allocated);
>
> +       *new = *old;
> +
>         list_replace(&old->node_list, &new->node_list);
> -       list_replace(&old->hole_stack, &new->hole_stack);
>         rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
> -       new->hole_follows = old->hole_follows;
> -       new->mm = old->mm;
> -       new->start = old->start;
> -       new->size = old->size;
> -       new->color = old->color;
> -       new->__subtree_last = old->__subtree_last;
> -
> -       old->allocated = 0;
> -       new->allocated = 1;
> +
> +       if (drm_mm_hole_follows(old)) {
> +               list_replace(&old->hole_stack, &new->hole_stack);
> +               rb_replace_node(&old->rb_hole_size,
> +                               &new->rb_hole_size,
> +                               &old->mm->holes_size);
> +               rb_replace_node(&old->rb_hole_addr,
> +                               &new->rb_hole_addr,
> +                               &old->mm->holes_addr);
> +       }
> +
> +       old->allocated = false;
> +       new->allocated = true;
>  }
>  EXPORT_SYMBOL(drm_mm_replace_node);
>
> @@ -577,7 +635,7 @@ EXPORT_SYMBOL(drm_mm_replace_node);
>   * @color: opaque tag value to use for the allocation
>   * @start: start of the allowed range for the allocation
>   * @end: end of the allowed range for the allocation
> - * @flags: flags to specify how the allocation will be performed afterwards
> + * @mode: fine-tune the allocation search and placement
>   *
>   * This simply sets up the scanning routines with the parameters for the desired
>   * hole.
> @@ -593,7 +651,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
>                                  unsigned long color,
>                                  u64 start,
>                                  u64 end,
> -                                unsigned int flags)
> +                                enum drm_mm_insert_mode mode)
>  {
>         DRM_MM_BUG_ON(start >= end);
>         DRM_MM_BUG_ON(!size || size > end - start);
> @@ -608,7 +666,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
>         scan->alignment = alignment;
>         scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
>         scan->size = size;
> -       scan->flags = flags;
> +       scan->mode = mode;
>
>         DRM_MM_BUG_ON(end <= start);
>         scan->range_start = start;
> @@ -667,7 +725,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
>         if (adj_end <= adj_start || adj_end - adj_start < scan->size)
>                 return false;
>
> -       if (scan->flags == DRM_MM_CREATE_TOP)
> +       if (scan->mode == DRM_MM_INSERT_HIGH)
>                 adj_start = adj_end - scan->size;
>
>         if (scan->alignment) {
> @@ -679,7 +737,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
>                         div64_u64_rem(adj_start, scan->alignment, &rem);
>                 if (rem) {
>                         adj_start -= rem;
> -                       if (scan->flags != DRM_MM_CREATE_TOP)
> +                       if (scan->mode != DRM_MM_INSERT_HIGH)
>                                 adj_start += scan->alignment;
>                         if (adj_start < max(col_start, scan->range_start) ||
>                             min(col_end, scan->range_end) - adj_start < scan->size)
> @@ -775,7 +833,7 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
>
>         hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
>         hole_start = __drm_mm_hole_node_start(hole);
> -       hole_end = __drm_mm_hole_node_end(hole);
> +       hole_end = hole_start + hole->hole_size;
>
>         DRM_MM_BUG_ON(hole_start > scan->hit_start);
>         DRM_MM_BUG_ON(hole_end < scan->hit_end);
> @@ -802,21 +860,22 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
>  {
>         DRM_MM_BUG_ON(start + size <= start);
>
> +       mm->color_adjust = NULL;
> +
>         INIT_LIST_HEAD(&mm->hole_stack);
> -       mm->scan_active = 0;
> +       mm->interval_tree = RB_ROOT;
> +       mm->holes_size = RB_ROOT;
> +       mm->holes_addr = RB_ROOT;
>
>         /* Clever trick to avoid a special case in the free hole tracking. */
>         INIT_LIST_HEAD(&mm->head_node.node_list);
> -       mm->head_node.allocated = 0;
> -       mm->head_node.hole_follows = 1;
> +       mm->head_node.allocated = false;
>         mm->head_node.mm = mm;
>         mm->head_node.start = start + size;
> -       mm->head_node.size = start - mm->head_node.start;
> -       list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
> +       mm->head_node.size = -size;
> +       add_hole(&mm->head_node);
>
> -       mm->interval_tree = RB_ROOT;
> -
> -       mm->color_adjust = NULL;
> +       mm->scan_active = 0;
>  }
>  EXPORT_SYMBOL(drm_mm_init);
>
> @@ -837,20 +896,17 @@ EXPORT_SYMBOL(drm_mm_takedown);
>
>  static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
>  {
> -       u64 hole_start, hole_end, hole_size;
> -
> -       if (entry->hole_follows) {
> -               hole_start = drm_mm_hole_node_start(entry);
> -               hole_end = drm_mm_hole_node_end(entry);
> -               hole_size = hole_end - hole_start;
> -               drm_printf(p, "%#018llx-%#018llx: %llu: free\n", hole_start,
> -                          hole_end, hole_size);
> -               return hole_size;
> +       u64 start, size;
> +
> +       size = entry->hole_size;
> +       if (size) {
> +               start = drm_mm_hole_node_start(entry);
> +               drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
> +                          start, start + size, size);
>         }
>
> -       return 0;
> +       return size;
>  }
> -
>  /**
>   * drm_mm_print - print allocator state
>   * @mm: drm_mm allocator to print
> diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
> index 20cc33d1bfc1..d9100b565198 100644
> --- a/drivers/gpu/drm/drm_vma_manager.c
> +++ b/drivers/gpu/drm/drm_vma_manager.c
> @@ -212,8 +212,7 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
>                 goto out_unlock;
>         }
>
> -       ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
> -                                pages, 0, DRM_MM_SEARCH_DEFAULT);
> +       ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, pages);
>         if (ret)
>                 goto out_unlock;
>
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> index f503af462dad..004456534e48 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
> @@ -107,6 +107,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
>                                    struct drm_mm_node *node, size_t size)
>  {
>         struct etnaviv_vram_mapping *free = NULL;
> +       enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
>         int ret;
>
>         lockdep_assert_held(&mmu->lock);
> @@ -117,15 +118,10 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
>                 struct list_head list;
>                 bool found;
>
> -               /*
> -                * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
> -                * drm_mm into giving out a low IOVA after address space
> -                * rollover. This needs a proper fix.
> -                */
>                 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
> -                       size, 0, mmu->last_iova, ~0UL,
> -                       mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
> -
> +                                                 size, 0, 0,
> +                                                 mmu->last_iova, U64_MAX,
> +                                                 mode);
>                 if (ret != -ENOSPC)
>                         break;
>
> @@ -140,7 +136,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
>                 }
>
>                 /* Try to retire some entries */
> -               drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, 0);
> +               drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
>
>                 found = 0;
>                 INIT_LIST_HEAD(&list);
> @@ -192,6 +188,8 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
>                         list_del_init(&m->scan_node);
>                 }
>
> +               mode = DRM_MM_INSERT_EVICT;
> +
>                 /*
>                  * We removed enough mappings so that the new allocation will
>                  * succeed.  Ensure that the MMU will be flushed before the
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 838f9f978786..6c7a83bbd068 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -69,12 +69,10 @@ insert_mappable_node(struct i915_ggtt *ggtt,
>                       struct drm_mm_node *node, u32 size)
>  {
>         memset(node, 0, sizeof(*node));
> -       return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
> -                                                  size, 0,
> -                                                  I915_COLOR_UNEVICTABLE,
> -                                                  0, ggtt->mappable_end,
> -                                                  DRM_MM_SEARCH_DEFAULT,
> -                                                  DRM_MM_CREATE_DEFAULT);
> +       return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
> +                                          size, 0, I915_COLOR_UNEVICTABLE,
> +                                          0, ggtt->mappable_end,
> +                                          DRM_MM_INSERT_LOW);
>  }
>
>  static void
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index a43e44e18042..c181b1bb3d2c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -109,6 +109,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
>         }, **phase;
>         struct i915_vma *vma, *next;
>         struct drm_mm_node *node;
> +       enum drm_mm_insert_mode mode;
>         int ret;
>
>         lockdep_assert_held(&vm->i915->drm.struct_mutex);
> @@ -127,10 +128,14 @@ i915_gem_evict_something(struct i915_address_space *vm,
>          * On each list, the oldest objects lie at the HEAD with the freshest
>          * object on the TAIL.
>          */
> +       mode = DRM_MM_INSERT_BEST;
> +       if (flags & PIN_HIGH)
> +               mode = DRM_MM_INSERT_HIGH;
> +       if (flags & PIN_MAPPABLE)
> +               mode = DRM_MM_INSERT_LOW;
>         drm_mm_scan_init_with_range(&scan, &vm->mm,
>                                     min_size, alignment, cache_level,
> -                                   start, end,
> -                                   flags & PIN_HIGH ? DRM_MM_CREATE_TOP : 0);
> +                                   start, end, mode);
>
>         /* Retire before we search the active list. Although we have
>          * reasonable accuracy in our retirement lists, we may have
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 91c2393199a3..a40ade6d1c16 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -437,12 +437,11 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
>                                                PIN_MAPPABLE | PIN_NONBLOCK);
>                 if (IS_ERR(vma)) {
>                         memset(&cache->node, 0, sizeof(cache->node));
> -                       ret = drm_mm_insert_node_in_range_generic
> +                       ret = drm_mm_insert_node_in_range
>                                 (&ggtt->base.mm, &cache->node,
>                                  PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
>                                  0, ggtt->mappable_end,
> -                                DRM_MM_SEARCH_DEFAULT,
> -                                DRM_MM_CREATE_DEFAULT);
> +                                DRM_MM_INSERT_LOW);
>                         if (ret) /* no inactive aperture space, use cpu reloc */
>                                 return NULL;
>                 } else {
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 048040efc3f0..b7bcb1e62ce4 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -2748,12 +2748,10 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
>                 return ret;
>
>         /* Reserve a mappable slot for our lockless error capture */
> -       ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
> -                                                 &ggtt->error_capture,
> -                                                 PAGE_SIZE, 0,
> -                                                 I915_COLOR_UNEVICTABLE,
> -                                                 0, ggtt->mappable_end,
> -                                                 0, 0);
> +       ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
> +                                         PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
> +                                         0, ggtt->mappable_end,
> +                                         DRM_MM_INSERT_LOW);
>         if (ret)
>                 return ret;
>
> @@ -3663,7 +3661,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
>                         u64 size, u64 alignment, unsigned long color,
>                         u64 start, u64 end, unsigned int flags)
>  {
> -       u32 search_flag, alloc_flag;
> +       enum drm_mm_insert_mode mode;
>         u64 offset;
>         int err;
>
> @@ -3684,13 +3682,11 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
>         if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
>                 return -ENOSPC;
>
> -       if (flags & PIN_HIGH) {
> -               search_flag = DRM_MM_SEARCH_BELOW;
> -               alloc_flag = DRM_MM_CREATE_TOP;
> -       } else {
> -               search_flag = DRM_MM_SEARCH_DEFAULT;
> -               alloc_flag = DRM_MM_CREATE_DEFAULT;
> -       }
> +       mode = DRM_MM_INSERT_BEST;
> +       if (flags & PIN_HIGH)
> +               mode = DRM_MM_INSERT_HIGH;
> +       if (flags & PIN_MAPPABLE)
> +               mode = DRM_MM_INSERT_LOW;
>
>         /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
>          * so we know that we always have a minimum alignment of 4096.
> @@ -3702,10 +3698,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
>         if (alignment <= I915_GTT_MIN_ALIGNMENT)
>                 alignment = 0;
>
> -       err = drm_mm_insert_node_in_range_generic(&vm->mm, node,
> -                                                 size, alignment, color,
> -                                                 start, end,
> -                                                 search_flag, alloc_flag);
> +       err = drm_mm_insert_node_in_range(&vm->mm, node,
> +                                         size, alignment, color,
> +                                         start, end, mode);
>         if (err != -ENOSPC)
>                 return err;
>
> @@ -3743,9 +3738,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
>         if (err)
>                 return err;
>
> -       search_flag = DRM_MM_SEARCH_DEFAULT;
> -       return drm_mm_insert_node_in_range_generic(&vm->mm, node,
> -                                                  size, alignment, color,
> -                                                  start, end,
> -                                                  search_flag, alloc_flag);
> +       return drm_mm_insert_node_in_range(&vm->mm, node,
> +                                          size, alignment, color,
> +                                          start, end, DRM_MM_INSERT_EVICT);
>  }
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index cba12ee90cbf..82ca8f49fec1 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -55,9 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
>                 return -ENODEV;
>
>         mutex_lock(&dev_priv->mm.stolen_lock);
> -       ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
> -                                         alignment, start, end,
> -                                         DRM_MM_SEARCH_DEFAULT);
> +       ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
> +                                         size, alignment, 0,
> +                                         start, end, DRM_MM_INSERT_BEST);
>         mutex_unlock(&dev_priv->mm.stolen_lock);
>
>         return ret;
> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> index 8098677a3916..c3b43f4d4f1f 100644
> --- a/drivers/gpu/drm/msm/msm_gem.c
> +++ b/drivers/gpu/drm/msm/msm_gem.c
> @@ -54,8 +54,7 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
>         if (!p)
>                 return ERR_PTR(-ENOMEM);
>
> -       ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
> -                       npages, 0, DRM_MM_SEARCH_DEFAULT);
> +       ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
>         if (ret) {
>                 drm_free_large(p);
>                 return ERR_PTR(ret);
> diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
> index a311d26ccb21..b654eca7636a 100644
> --- a/drivers/gpu/drm/msm/msm_gem_vma.c
> +++ b/drivers/gpu/drm/msm/msm_gem_vma.c
> @@ -45,8 +45,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
>         if (WARN_ON(drm_mm_node_allocated(&vma->node)))
>                 return 0;
>
> -       ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages,
> -                       0, DRM_MM_SEARCH_DEFAULT);
> +       ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
>         if (ret)
>                 return ret;
>
> diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
> index 6df53e6c1308..bb5b7480e0b4 100644
> --- a/drivers/gpu/drm/selftests/test-drm_mm.c
> +++ b/drivers/gpu/drm/selftests/test-drm_mm.c
> @@ -22,23 +22,24 @@ static unsigned int max_iterations = 8192;
>  static unsigned int max_prime = 128;
>
>  enum {
> -       DEFAULT,
> -       TOPDOWN,
>         BEST,
> +       BOTTOMUP,
> +       TOPDOWN,
> +       EVICT,
>  };
>
>  static const struct insert_mode {
>         const char *name;
> -       unsigned int search_flags;
> -       unsigned int create_flags;
> +       enum drm_mm_insert_mode mode;
>  } insert_modes[] = {
> -       [DEFAULT] = { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
> -       [TOPDOWN] = { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
> -       [BEST] = { "best", DRM_MM_SEARCH_BEST, DRM_MM_CREATE_DEFAULT },
> +       [BEST] = { "best", DRM_MM_INSERT_BEST },
> +       [BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
> +       [TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
> +       [EVICT] = { "evict", DRM_MM_INSERT_EVICT },
>         {}
>  }, evict_modes[] = {
> -       { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
> -       { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
> +       { "bottom-up", DRM_MM_INSERT_LOW },
> +       { "top-down", DRM_MM_INSERT_HIGH },
>         {}
>  };
>
> @@ -526,8 +527,7 @@ static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
>
>         err = drm_mm_insert_node_generic(mm, node,
>                                          size, alignment, color,
> -                                        mode->search_flags,
> -                                        mode->create_flags);
> +                                        mode->mode);
>         if (err) {
>                 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
>                        size, alignment, color, mode->name, err);
> @@ -547,7 +547,7 @@ static bool expect_insert_fail(struct drm_mm *mm, u64 size)
>         struct drm_mm_node tmp = {};
>         int err;
>
> -       err = drm_mm_insert_node(mm, &tmp, size, 0, DRM_MM_SEARCH_DEFAULT);
> +       err = drm_mm_insert_node(mm, &tmp, size);
>         if (likely(err == -ENOSPC))
>                 return true;
>
> @@ -753,11 +753,10 @@ static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
>  {
>         int err;
>
> -       err = drm_mm_insert_node_in_range_generic(mm, node,
> -                                                 size, alignment, color,
> -                                                 range_start, range_end,
> -                                                 mode->search_flags,
> -                                                 mode->create_flags);
> +       err = drm_mm_insert_node_in_range(mm, node,
> +                                         size, alignment, color,
> +                                         range_start, range_end,
> +                                         mode->mode);
>         if (err) {
>                 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
>                        size, alignment, color, mode->name,
> @@ -781,11 +780,10 @@ static bool expect_insert_in_range_fail(struct drm_mm *mm,
>         struct drm_mm_node tmp = {};
>         int err;
>
> -       err = drm_mm_insert_node_in_range_generic(mm, &tmp,
> -                                                 size, 0, 0,
> -                                                 range_start, range_end,
> -                                                 DRM_MM_SEARCH_DEFAULT,
> -                                                 DRM_MM_CREATE_DEFAULT);
> +       err = drm_mm_insert_node_in_range(mm, &tmp,
> +                                         size, 0, 0,
> +                                         range_start, range_end,
> +                                         0);
>         if (likely(err == -ENOSPC))
>                 return true;
>
> @@ -1324,7 +1322,7 @@ static int evict_something(struct drm_mm *mm,
>         drm_mm_scan_init_with_range(&scan, mm,
>                                     size, alignment, 0,
>                                     range_start, range_end,
> -                                   mode->create_flags);
> +                                   mode->mode);
>         if (!evict_nodes(&scan,
>                          nodes, order, count, false,
>                          &evict_list))
> @@ -1332,8 +1330,7 @@ static int evict_something(struct drm_mm *mm,
>
>         memset(&tmp, 0, sizeof(tmp));
>         err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
> -                                        mode->search_flags,
> -                                        mode->create_flags);
> +                                        DRM_MM_INSERT_EVICT);
>         if (err) {
>                 pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
>                        size, alignment);
> @@ -1408,8 +1405,7 @@ static int igt_evict(void *ignored)
>         ret = -EINVAL;
>         drm_mm_init(&mm, 0, size);
>         for (n = 0; n < size; n++) {
> -               err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
> -                                        DRM_MM_SEARCH_DEFAULT);
> +               err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
>                 if (err) {
>                         pr_err("insert failed, step %d\n", n);
>                         ret = err;
> @@ -1517,8 +1513,7 @@ static int igt_evict_range(void *ignored)
>         ret = -EINVAL;
>         drm_mm_init(&mm, 0, size);
>         for (n = 0; n < size; n++) {
> -               err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
> -                                        DRM_MM_SEARCH_DEFAULT);
> +               err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
>                 if (err) {
>                         pr_err("insert failed, step %d\n", n);
>                         ret = err;
> @@ -1904,7 +1899,7 @@ static int evict_color(struct drm_mm *mm,
>         drm_mm_scan_init_with_range(&scan, mm,
>                                     size, alignment, color,
>                                     range_start, range_end,
> -                                   mode->create_flags);
> +                                   mode->mode);
>         if (!evict_nodes(&scan,
>                          nodes, order, count, true,
>                          &evict_list))
> @@ -1912,8 +1907,7 @@ static int evict_color(struct drm_mm *mm,
>
>         memset(&tmp, 0, sizeof(tmp));
>         err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
> -                                        mode->search_flags,
> -                                        mode->create_flags);
> +                                        DRM_MM_INSERT_EVICT);
>         if (err) {
>                 pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
>                        size, alignment, color, err);
> diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
> index 03defda77766..1622db24cd39 100644
> --- a/drivers/gpu/drm/sis/sis_mm.c
> +++ b/drivers/gpu/drm/sis/sis_mm.c
> @@ -109,8 +109,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
>         if (pool == AGP_TYPE) {
>                 retval = drm_mm_insert_node(&dev_priv->agp_mm,
>                                             &item->mm_node,
> -                                           mem->size, 0,
> -                                           DRM_MM_SEARCH_DEFAULT);
> +                                           mem->size);
>                 offset = item->mm_node.start;
>         } else {
>  #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
> @@ -122,8 +121,7 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
>  #else
>                 retval = drm_mm_insert_node(&dev_priv->vram_mm,
>                                             &item->mm_node,
> -                                           mem->size, 0,
> -                                           DRM_MM_SEARCH_DEFAULT);
> +                                           mem->size);
>                 offset = item->mm_node.start;
>  #endif
>         }
> diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
> index 7d853e6b5ff0..b523a5d4a38c 100644
> --- a/drivers/gpu/drm/tegra/gem.c
> +++ b/drivers/gpu/drm/tegra/gem.c
> @@ -128,8 +128,8 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
>         if (!bo->mm)
>                 return -ENOMEM;
>
> -       err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
> -                                        PAGE_SIZE, 0, 0, 0);
> +       err = drm_mm_insert_node_generic(&tegra->mm,
> +                                        bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
>         if (err < 0) {
>                 dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
>                         err);
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
> index 988c48d1cf3e..90a6c0b03afc 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
> @@ -54,9 +54,8 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
>  {
>         struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
>         struct drm_mm *mm = &rman->mm;
> -       struct drm_mm_node *node = NULL;
> -       enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
> -       enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
> +       struct drm_mm_node *node;
> +       enum drm_mm_insert_mode mode;
>         unsigned long lpfn;
>         int ret;
>
> @@ -68,16 +67,15 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
>         if (!node)
>                 return -ENOMEM;
>
> -       if (place->flags & TTM_PL_FLAG_TOPDOWN) {
> -               sflags = DRM_MM_SEARCH_BELOW;
> -               aflags = DRM_MM_CREATE_TOP;
> -       }
> +       mode = DRM_MM_INSERT_BEST;
> +       if (place->flags & TTM_PL_FLAG_TOPDOWN)
> +               mode = DRM_MM_INSERT_HIGH;
>
>         spin_lock(&rman->lock);
> -       ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
> +       ret = drm_mm_insert_node_in_range(mm, node,
> +                                         mem->num_pages,
>                                           mem->page_alignment, 0,
> -                                         place->fpfn, lpfn,
> -                                         sflags, aflags);
> +                                         place->fpfn, lpfn, mode);
>         spin_unlock(&rman->lock);
>
>         if (unlikely(ret)) {
> diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
> index 93aece203c54..0c06844af445 100644
> --- a/drivers/gpu/drm/vc4/vc4_crtc.c
> +++ b/drivers/gpu/drm/vc4/vc4_crtc.c
> @@ -593,7 +593,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
>
>         spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
>         ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm,
> -                                dlist_count, 1, 0);
> +                                dlist_count);
>         spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
>         if (ret)
>                 return ret;
> diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
> index fc68b1b4da52..f7f7677f6d8d 100644
> --- a/drivers/gpu/drm/vc4/vc4_hvs.c
> +++ b/drivers/gpu/drm/vc4/vc4_hvs.c
> @@ -141,8 +141,7 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
>         int ret, i;
>         u32 __iomem *dst_kernel;
>
> -       ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS, 1,
> -                                0);
> +       ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
>         if (ret) {
>                 DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
>                           ret);
> diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
> index 110d1518f5d5..c1f06897136b 100644
> --- a/drivers/gpu/drm/vc4/vc4_plane.c
> +++ b/drivers/gpu/drm/vc4/vc4_plane.c
> @@ -514,9 +514,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
>         if (lbm_size) {
>                 if (!vc4_state->lbm.allocated) {
>                         spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
> -                       ret = drm_mm_insert_node(&vc4->hvs->lbm_mm,
> -                                                &vc4_state->lbm,
> -                                                lbm_size, 32, 0);
> +                       ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
> +                                                        &vc4_state->lbm,
> +                                                        lbm_size, 32, 0, 0);
>                         spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
>                 } else {
>                         WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
> diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
> index a04ef1c992d9..4217d66a5cc6 100644
> --- a/drivers/gpu/drm/via/via_mm.c
> +++ b/drivers/gpu/drm/via/via_mm.c
> @@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data,
>         if (mem->type == VIA_MEM_AGP)
>                 retval = drm_mm_insert_node(&dev_priv->agp_mm,
>                                             &item->mm_node,
> -                                           tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
> +                                           tmpSize);
>         else
>                 retval = drm_mm_insert_node(&dev_priv->vram_mm,
>                                             &item->mm_node,
> -                                           tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
> +                                           tmpSize);
>         if (retval)
>                 goto fail_alloc;
>
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
> index aa04fb0159a7..77cb7c627e09 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
> @@ -673,16 +673,10 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
>
>         memset(info->node, 0, sizeof(*info->node));
>         spin_lock_bh(&man->lock);
> -       ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
> -                                        0, 0,
> -                                        DRM_MM_SEARCH_DEFAULT,
> -                                        DRM_MM_CREATE_DEFAULT);
> +       ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
>         if (ret) {
>                 vmw_cmdbuf_man_process(man);
> -               ret = drm_mm_insert_node_generic(&man->mm, info->node,
> -                                                info->page_size, 0, 0,
> -                                                DRM_MM_SEARCH_DEFAULT,
> -                                                DRM_MM_CREATE_DEFAULT);
> +               ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
>         }
>
>         spin_unlock_bh(&man->lock);
> diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
> index 3bddca8fd2b5..d81b0ba9921f 100644
> --- a/include/drm/drm_mm.h
> +++ b/include/drm/drm_mm.h
> @@ -53,19 +53,62 @@
>  #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
>  #endif
>
> -enum drm_mm_search_flags {
> -       DRM_MM_SEARCH_DEFAULT =         0,
> -       DRM_MM_SEARCH_BEST =            1 << 0,
> -       DRM_MM_SEARCH_BELOW =           1 << 1,
> -};
> +/**
> + * enum drm_mm_insert_mode - control search and allocation behaviour
> + *
> + * The &struct drm_mm range manager supports finding a suitable modes using
> + * a number of search trees. These trees are oranised by size, by address and
> + * in most recent eviction order. This allows the user to find either the
> + * smallest hole to reuse, the lowest or highest address to reuse, or simply
> + * reuse the most recent eviction that fits. When allocating the &drm_mm_node
> + * from within the hole, the &drm_mm_insert_mode also dictate whether to
> + * allocate the lowest matching address or the highest.
> + */
> +enum drm_mm_insert_mode {
> +       /**
> +        * @DRM_MM_INSERT_BEST:
> +        *
> +        * Search for the smallest hole (within the search range) that fits
> +        * the desired node.
> +        *
> +        * Allocates the node from the bottom of the found hole.
> +        */
> +       DRM_MM_INSERT_BEST = 0,
>
> -enum drm_mm_allocator_flags {
> -       DRM_MM_CREATE_DEFAULT =         0,
> -       DRM_MM_CREATE_TOP =             1 << 0,
> -};
> +       /**
> +        * @DRM_MM_INSERT_LOW:
> +        *
> +        * Search for the lowest hole (address closest to 0, within the search
> +        * range) that fits the desired node.
> +        *
> +        * Allocates the node from the bottom of the found hole.
> +        */
> +       DRM_MM_INSERT_LOW,
>
> -#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
> -#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
> +       /**
> +        * @DRM_MM_INSERT_HIGH:
> +        *
> +        * Search for the highest hole (address closest to U64_MAX, within the
> +        * search range) that fits the desired node.
> +        *
> +        * Allocates the node from the *top* of the found hole. The specified
> +        * alignment for the node is applied to the base of the node
> +        * (&drm_mm_node.start).
> +        */
> +       DRM_MM_INSERT_HIGH,
> +
> +       /**
> +        * @DRM_MM_INSERT_EVICT:
> +        *
> +        * Search for the most recently evicted hole (within the search range)
> +        * that fits the desired node. This is appropriate for use immediately
> +        * after performing an eviction scan (see drm_mm_scan_init()) and
> +        * removing the selected nodes to form a hole.
> +        *
> +        * Allocates the node from the bottom of the found hole.
> +        */
> +       DRM_MM_INSERT_EVICT,
> +};
>
>  /**
>   * struct drm_mm_node - allocated block in the DRM allocator
> @@ -84,14 +127,16 @@ struct drm_mm_node {
>         /** @size: Size of the allocated block. */
>         u64 size;
>         /* private: */
> +       struct drm_mm *mm;
>         struct list_head node_list;
>         struct list_head hole_stack;
>         struct rb_node rb;
> -       unsigned hole_follows : 1;
> -       unsigned allocated : 1;
> -       bool scanned_block : 1;
> +       struct rb_node rb_hole_size;
> +       struct rb_node rb_hole_addr;
>         u64 __subtree_last;
> -       struct drm_mm *mm;
> +       u64 hole_size;
> +       bool allocated : 1;
> +       bool scanned_block : 1;
>  #ifdef CONFIG_DRM_DEBUG_MM
>         depot_stack_handle_t stack;
>  #endif
> @@ -127,6 +172,8 @@ struct drm_mm {
>         struct drm_mm_node head_node;
>         /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
>         struct rb_root interval_tree;
> +       struct rb_root holes_size;
> +       struct rb_root holes_addr;
>
>         unsigned long scan_active;
>  };
> @@ -155,7 +202,7 @@ struct drm_mm_scan {
>         u64 hit_end;
>
>         unsigned long color;
> -       unsigned int flags;
> +       enum drm_mm_insert_mode mode;
>  };
>
>  /**
> @@ -208,7 +255,7 @@ static inline bool drm_mm_initialized(const struct drm_mm *mm)
>   */
>  static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
>  {
> -       return node->hole_follows;
> +       return node->hole_size;
>  }
>
>  static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
> @@ -291,17 +338,9 @@ static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
>  #define drm_mm_for_each_node_safe(entry, next, mm) \
>         list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
>
> -#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
> -       for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
> -            &entry->hole_stack != &(mm)->hole_stack ? \
> -            hole_start = drm_mm_hole_node_start(entry), \
> -            hole_end = drm_mm_hole_node_end(entry), \
> -            1 : 0; \
> -            entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
> -
>  /**
>   * drm_mm_for_each_hole - iterator to walk over all holes
> - * @entry: &drm_mm_node used internally to track progress
> + * @pos: &drm_mm_node used internally to track progress
>   * @mm: &drm_mm allocator to walk
>   * @hole_start: ulong variable to assign the hole start to on each iteration
>   * @hole_end: ulong variable to assign the hole end to on each iteration
> @@ -314,57 +353,28 @@ static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
>   * Implementation Note:
>   * We need to inline list_for_each_entry in order to be able to set hole_start
>   * and hole_end on each iteration while keeping the macro sane.
> - *
> - * The __drm_mm_for_each_hole version is similar, but with added support for
> - * going backwards.
>   */
> -#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
> -       __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0)
> +#define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \
> +       for (pos = list_first_entry(&(mm)->hole_stack, \
> +                                   typeof(*pos), hole_stack); \
> +            &pos->hole_stack != &(mm)->hole_stack ? \
> +            hole_start = drm_mm_hole_node_start(pos), \
> +            hole_end = hole_start + pos->hole_size, \
> +            1 : 0; \
> +            pos = list_next_entry(pos, hole_stack))
>
>  /*
>   * Basic range manager support (drm_mm.c)
>   */
>  int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
> -int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
> -                                       struct drm_mm_node *node,
> -                                       u64 size,
> -                                       u64 alignment,
> -                                       unsigned long color,
> -                                       u64 start,
> -                                       u64 end,
> -                                       enum drm_mm_search_flags sflags,
> -                                       enum drm_mm_allocator_flags aflags);
> -
> -/**
> - * drm_mm_insert_node_in_range - ranged search for space and insert @node
> - * @mm: drm_mm to allocate from
> - * @node: preallocate node to insert
> - * @size: size of the allocation
> - * @alignment: alignment of the allocation
> - * @start: start of the allowed range for this node
> - * @end: end of the allowed range for this node
> - * @flags: flags to fine-tune the allocation
> - *
> - * This is a simplified version of drm_mm_insert_node_in_range_generic() with
> - * @color set to 0.
> - *
> - * The preallocated node must be cleared to 0.
> - *
> - * Returns:
> - * 0 on success, -ENOSPC if there's no suitable hole.
> - */
> -static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
> -                                             struct drm_mm_node *node,
> -                                             u64 size,
> -                                             u64 alignment,
> -                                             u64 start,
> -                                             u64 end,
> -                                             enum drm_mm_search_flags flags)
> -{
> -       return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
> -                                                  0, start, end, flags,
> -                                                  DRM_MM_CREATE_DEFAULT);
> -}
> +int drm_mm_insert_node_in_range(struct drm_mm *mm,
> +                               struct drm_mm_node *node,
> +                               u64 size,
> +                               u64 alignment,
> +                               unsigned long color,
> +                               u64 start,
> +                               u64 end,
> +                               enum drm_mm_insert_mode mode);
>
>  /**
>   * drm_mm_insert_node_generic - search for space and insert @node
> @@ -373,8 +383,7 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
>   * @size: size of the allocation
>   * @alignment: alignment of the allocation
>   * @color: opaque tag value to use for this node
> - * @sflags: flags to fine-tune the allocation search
> - * @aflags: flags to fine-tune the allocation behavior
> + * @mode: fine-tune the allocation search and placement
>   *
>   * This is a simplified version of drm_mm_insert_node_in_range_generic() with no
>   * range restrictions applied.
> @@ -388,13 +397,11 @@ static inline int
>  drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
>                            u64 size, u64 alignment,
>                            unsigned long color,
> -                          enum drm_mm_search_flags sflags,
> -                          enum drm_mm_allocator_flags aflags)
> +                          enum drm_mm_insert_mode mode)
>  {
> -       return drm_mm_insert_node_in_range_generic(mm, node,
> -                                                  size, alignment, 0,
> -                                                  0, U64_MAX,
> -                                                  sflags, aflags);
> +       return drm_mm_insert_node_in_range(mm, node,
> +                                          size, alignment, color,
> +                                          0, U64_MAX, mode);
>  }
>
>  /**
> @@ -402,8 +409,6 @@ drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
>   * @mm: drm_mm to allocate from
>   * @node: preallocate node to insert
>   * @size: size of the allocation
> - * @alignment: alignment of the allocation
> - * @flags: flags to fine-tune the allocation
>   *
>   * This is a simplified version of drm_mm_insert_node_generic() with @color set
>   * to 0.
> @@ -415,13 +420,9 @@ drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
>   */
>  static inline int drm_mm_insert_node(struct drm_mm *mm,
>                                      struct drm_mm_node *node,
> -                                    u64 size,
> -                                    u64 alignment,
> -                                    enum drm_mm_search_flags flags)
> +                                    u64 size)
>  {
> -       return drm_mm_insert_node_generic(mm, node,
> -                                         size, alignment, 0,
> -                                         flags, DRM_MM_CREATE_DEFAULT);
> +       return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
>  }
>
>  void drm_mm_remove_node(struct drm_mm_node *node);
> @@ -468,7 +469,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
>                                  struct drm_mm *mm,
>                                  u64 size, u64 alignment, unsigned long color,
>                                  u64 start, u64 end,
> -                                unsigned int flags);
> +                                enum drm_mm_insert_mode mode);
>
>  /**
>   * drm_mm_scan_init - initialize lru scanning
> @@ -477,7 +478,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
>   * @size: size of the allocation
>   * @alignment: alignment of the allocation
>   * @color: opaque tag value to use for the allocation
> - * @flags: flags to specify how the allocation will be performed afterwards
> + * @mode: fine-tune the allocation search and placement
>   *
>   * This is a simplified version of drm_mm_scan_init_with_range() with no range
>   * restrictions applied.
> @@ -494,12 +495,11 @@ static inline void drm_mm_scan_init(struct drm_mm_scan *scan,
>                                     u64 size,
>                                     u64 alignment,
>                                     unsigned long color,
> -                                   unsigned int flags)
> +                                   enum drm_mm_insert_mode mode)
>  {
>         drm_mm_scan_init_with_range(scan, mm,
>                                     size, alignment, color,
> -                                   0, U64_MAX,
> -                                   flags);
> +                                   0, U64_MAX, mode);
>  }
>
>  bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
> --
> 2.11.0
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@xxxxxxxxxxxxxxxxxxxxx
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx





[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux