Re: [PATCH 029/262] drm/i915: Track the purgeable objects on a separate eviction list

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 17 May 2018 at 07:03, Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> wrote:
> Currently the purgeable objects, I915_MADV_DONTNEED, as mixed in the
> normal bound/unbound lists. Every shrinker pass starts with an attempt
> to purge from this set of unneeded objects, which entails us doing a
> walk over both lists looking for any candidates. If there are none, and
> since we are shrinking we can reasonably assume that the lists are
> full!, this becomes a very slow futile walk.
>
> If we separate out the purgeable objects into own list, this search then
> becomes its own phase that is preferentially handled during shrinking.
> Instead the cost becomes that we then need to filter the purgeable list
> if we want to distinguish between bound and unbound objects.
>
> Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
> Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx>
> ---
>  drivers/gpu/drm/i915/i915_drv.h          | 13 ++++---
>  drivers/gpu/drm/i915/i915_gem.c          | 49 ++++++++++++++++++------
>  drivers/gpu/drm/i915/i915_gem_shrinker.c | 28 +++++++-------
>  drivers/gpu/drm/i915/i915_vma.c          |  2 +-
>  4 files changed, 60 insertions(+), 32 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 56ffdd523893..1eeee043e164 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -926,6 +926,10 @@ struct i915_gem_mm {
>          * not actually have any pages attached.
>          */
>         struct list_head unbound_list;
> +       /**
> +        * List of objects which are purgeable. May be active.
> +        */
> +       struct list_head purge_list;
>
>         /** List of all objects in gtt_space, currently mmaped by userspace.
>          * All objects within this list must also be on bound_list.
> @@ -3310,11 +3314,10 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915,
>                               unsigned long target,
>                               unsigned long *nr_scanned,
>                               unsigned flags);
> -#define I915_SHRINK_PURGEABLE 0x1
> -#define I915_SHRINK_UNBOUND 0x2
> -#define I915_SHRINK_BOUND 0x4
> -#define I915_SHRINK_ACTIVE 0x8
> -#define I915_SHRINK_VMAPS 0x10
> +#define I915_SHRINK_UNBOUND BIT(0)
> +#define I915_SHRINK_BOUND BIT(1)
> +#define I915_SHRINK_ACTIVE BIT(2)
> +#define I915_SHRINK_VMAPS BIT(3)
>  unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
>  void i915_gem_shrinker_register(struct drm_i915_private *i915);
>  void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 42d95f6490f8..be3092a03722 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1672,8 +1672,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
>
>  static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
>  {
> -       struct drm_i915_private *i915;
> -       struct list_head *list;
>         struct i915_vma *vma;
>
>         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
> @@ -1688,11 +1686,16 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
>                 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
>         }
>
> -       i915 = to_i915(obj->base.dev);
> -       spin_lock(&i915->mm.obj_lock);
> -       list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
> -       list_move_tail(&obj->mm.link, list);
> -       spin_unlock(&i915->mm.obj_lock);
> +       if (obj->mm.madv == I915_MADV_WILLNEED) {
> +               struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +               struct list_head *list;
> +
> +               spin_lock(&i915->mm.obj_lock);
> +               list = obj->bind_count ?
> +                       &i915->mm.bound_list : &i915->mm.unbound_list;
> +               list_move_tail(&obj->mm.link, list);
> +               spin_unlock(&i915->mm.obj_lock);
> +       }
>  }
>
>  /**
> @@ -2531,7 +2534,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>         sg_page_sizes = 0;
>         for (i = 0; i < page_count; i++) {
>                 const unsigned int shrink[] = {
> -                       I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
> +                       I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
>                         0,
>                 }, *s = shrink;
>                 gfp_t gfp = noreclaim;
> @@ -4519,7 +4522,7 @@ int
>  i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
>                        struct drm_file *file_priv)
>  {
> -       struct drm_i915_private *dev_priv = to_i915(dev);
> +       struct drm_i915_private *i915 = to_i915(dev);
>         struct drm_i915_gem_madvise *args = data;
>         struct drm_i915_gem_object *obj;
>         int err;
> @@ -4542,7 +4545,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
>
>         if (i915_gem_object_has_pages(obj) &&
>             i915_gem_object_is_tiled(obj) &&
> -           dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
> +           i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
>                 if (obj->mm.madv == I915_MADV_WILLNEED) {
>                         GEM_BUG_ON(!obj->mm.quirked);
>                         __i915_gem_object_unpin_pages(obj);
> @@ -4558,6 +4561,20 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
>         if (obj->mm.madv != __I915_MADV_PURGED)
>                 obj->mm.madv = args->madv;
>
> +       if (i915_gem_object_has_pages(obj)) {
> +               struct list_head *list;
> +
> +               spin_lock(&i915->mm.obj_lock);
> +               if (obj->mm.madv != I915_MADV_WILLNEED)
> +                       list = &i915->mm.purge_list;
> +               else if (obj->bind_count)
> +                       list = &i915->mm.bound_list;
> +               else
> +                       list = &i915->mm.unbound_list;
> +               list_move_tail(&obj->mm.link, list);
> +               spin_unlock(&i915->mm.obj_lock);
> +       }
> +
>         /* if the object is no longer attached, discard its backing storage */
>         if (obj->mm.madv == I915_MADV_DONTNEED &&
>             !i915_gem_object_has_pages(obj))
> @@ -4876,9 +4893,18 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
>         if (obj->mm.quirked)
>                 __i915_gem_object_unpin_pages(obj);
>
> -       if (discard_backing_storage(obj))
> +       if (discard_backing_storage(obj)) {
> +               struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +
>                 obj->mm.madv = I915_MADV_DONTNEED;
>
> +               if (i915_gem_object_has_pages(obj)) {
> +                       spin_lock(&i915->mm.obj_lock);
> +                       list_move_tail(&obj->mm.link, &i915->mm.purge_list);
> +                       spin_unlock(&i915->mm.obj_lock);
> +               }
> +       }
> +
>         /*
>          * Before we free the object, make sure any pure RCU-only
>          * read-side critical sections are complete, e.g.
> @@ -5527,6 +5553,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
>
>         init_llist_head(&i915->mm.free_list);
>
> +       INIT_LIST_HEAD(&i915->mm.purge_list);
>         INIT_LIST_HEAD(&i915->mm.unbound_list);
>         INIT_LIST_HEAD(&i915->mm.bound_list);
>         INIT_LIST_HEAD(&i915->mm.fence_list);
> diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> index 5757fb7c4b5a..ea5a6e2d62d2 100644
> --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
> +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> @@ -151,6 +151,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
>                 struct list_head *list;
>                 unsigned int bit;
>         } phases[] = {
> +               { &i915->mm.purge_list, 0 },
>                 { &i915->mm.unbound_list, I915_SHRINK_UNBOUND },
>                 { &i915->mm.bound_list, I915_SHRINK_BOUND },
>                 { NULL, 0 },
> @@ -182,8 +183,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
>          * device just to recover a little memory. If absolutely necessary,
>          * we will force the wake during oom-notifier.
>          */
> -       if ((flags & I915_SHRINK_BOUND) &&
> -           !intel_runtime_pm_get_if_in_use(i915))
> +       if (flags & I915_SHRINK_BOUND && !intel_runtime_pm_get_if_in_use(i915))
>                 flags &= ~I915_SHRINK_BOUND;
>
>         /*
> @@ -209,7 +209,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
>                 struct list_head still_in_list;
>                 struct drm_i915_gem_object *obj;
>
> -               if ((flags & phase->bit) == 0)
> +               if (phase->bit && (flags & phase->bit) == 0)
>                         continue;
>
>                 INIT_LIST_HEAD(&still_in_list);
> @@ -228,10 +228,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
>                                                        mm.link))) {
>                         list_move_tail(&obj->mm.link, &still_in_list);
>
> -                       if (flags & I915_SHRINK_PURGEABLE &&
> -                           obj->mm.madv != I915_MADV_DONTNEED)
> -                               continue;
> -
>                         if (flags & I915_SHRINK_VMAPS &&
>                             !is_vmalloc_addr(obj->mm.mapping))
>                                 continue;
> @@ -241,6 +237,10 @@ i915_gem_shrink(struct drm_i915_private *i915,
>                              i915_gem_object_is_framebuffer(obj)))
>                                 continue;
>
> +                       if (!(flags & I915_SHRINK_BOUND) &&
> +                           READ_ONCE(obj->bind_count))
> +                               continue;
> +
>                         if (!can_release_pages(obj))
>                                 continue;
>
> @@ -325,6 +325,11 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
>                         count += obj->base.size >> PAGE_SHIFT;
>                         num_objects++;
>                 }
> +       list_for_each_entry(obj, &i915->mm.purge_list, mm.link)
> +               if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
> +                       count += obj->base.size >> PAGE_SHIFT;
> +                       num_objects++;
> +               }
>         spin_unlock(&i915->mm.obj_lock);
>
>         /* Update our preferred vmscan batch size for the next pass.
> @@ -361,14 +366,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
>                                 sc->nr_to_scan,
>                                 &sc->nr_scanned,
>                                 I915_SHRINK_BOUND |
> -                               I915_SHRINK_UNBOUND |
> -                               I915_SHRINK_PURGEABLE);
> -       if (sc->nr_scanned < sc->nr_to_scan)
> -               freed += i915_gem_shrink(i915,
> -                                        sc->nr_to_scan - sc->nr_scanned,
> -                                        &sc->nr_scanned,
> -                                        I915_SHRINK_BOUND |
> -                                        I915_SHRINK_UNBOUND);
> +                               I915_SHRINK_UNBOUND);
>         if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
>                 intel_runtime_pm_get(i915);
>                 freed += i915_gem_shrink(i915,
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index 9324d476e0a7..96039c4ef434 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -624,7 +624,7 @@ i915_vma_remove(struct i915_vma *vma)
>          * no more VMAs exist.
>          */
>         spin_lock(&i915->mm.obj_lock);
> -       if (--obj->bind_count == 0)
> +       if (--obj->bind_count == 0 && obj->mm.madv == I915_MADV_WILLNEED)

How does this play with volatile objects, like gem object internal,
since they are DONTNEED while pinned?

>                 list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
>         spin_unlock(&i915->mm.obj_lock);
>
> --
> 2.17.0
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux