Re: [PATCH v3 08/12] drm/i915/ttm: add tt shmem backend

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, 2021-09-15 at 19:59 +0100, Matthew Auld wrote:
> For cached objects we can allocate our pages directly in shmem. This
> should make it possible(in a later patch) to utilise the existing
> i915-gem shrinker code for such objects. For now this is still
> disabled.
> 
> Signed-off-by: Matthew Auld <matthew.auld@xxxxxxxxx>
> Cc: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx>
> Cc: Christian König <christian.koenig@xxxxxxx>
> ---
>  drivers/gpu/drm/i915/gem/i915_gem_object.h |   8 +
>  drivers/gpu/drm/i915/gem/i915_gem_shmem.c  |  14 +-
>  drivers/gpu/drm/i915/gem/i915_gem_ttm.c    | 217 ++++++++++++++++++-
> --
>  3 files changed, 209 insertions(+), 30 deletions(-)
> 
> 

...

>  }
> @@ -223,6 +349,10 @@ static bool i915_ttm_eviction_valuable(struct
> ttm_buffer_object *bo,
>  {
>         struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
>  
> +       if (place->mem_type == TTM_PL_SYSTEM &&
> +           bo->ttm && bo->ttm->page_flags & I915_TTM_TT_SHMEM)
> +               return false;
> +


Should we use ttm_bo_pin() / ttm_bo_unpin() to remove the bo from the
TTM LRU lists when it's SHMEM populated, and change the above to a
GEM_BUG_ON()?


>         /* Will do for now. Our pinned objects are still on TTM's LRU
> lists */
>         return i915_gem_object_evictable(obj);
>  }
> @@ -316,9 +446,11 @@ static void
> i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
>         i915_gem_object_set_cache_coherency(obj, cache_level);
>  }
>  
> -static void i915_ttm_purge(struct drm_i915_gem_object *obj)
> +static void i915_ttm_writeback(struct drm_i915_gem_object *obj)
>  {
>         struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
> +       struct i915_ttm_tt *i915_tt =
> +               container_of(bo->ttm, typeof(*i915_tt), ttm);
>         struct ttm_operation_ctx ctx = {
>                 .interruptible = true,
>                 .no_wait_gpu = false,
> @@ -326,18 +458,52 @@ static void i915_ttm_purge(struct
> drm_i915_gem_object *obj)
>         struct ttm_placement place = {};
>         int ret;
>  
> -       if (obj->mm.madv == __I915_MADV_PURGED)
> +       if (!bo->ttm || !(bo->ttm->page_flags & I915_TTM_TT_SHMEM))
>                 return;
>  
> -       /* TTM's purge interface. Note that we might be reentering.
> */
> +       i915_tt->backup = true;
>         ret = ttm_bo_validate(bo, &place, &ctx);

It looks like writeback() becomes the backend's primary callback for
shrinking, although for the shmem backend, this is put_pages(), and the
writeback() callback is only called if needed to accelerate shmem's
transfer of pages to the swap cache. It appears like this will break
the shrinker reporting of shrunken pages?

Should we try to do the primary shrinking from the put_pages() callback
instead, like the shmem backend? Although we'd have to allow NULL pages
in put_pages() to account for evicted-to-shmem and adjust other
backends if necessary.



> -       if (!ret) {
> -               obj->write_domain = 0;
> -               obj->read_domains = 0;
> -               i915_ttm_adjust_gem_after_move(obj);
> -               i915_ttm_free_cached_io_st(obj);
> -               obj->mm.madv = __I915_MADV_PURGED;
> +       i915_tt->backup = false;
> +       if (ret)
> +               return;
> +
> +       __shmem_writeback(obj->base.size, i915_tt->filp->f_mapping);
> +}
> +





[Index of Archives]     [AMD Graphics]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux