Re: [PATCH v2 5/5] drm/i915: Start writeback from the shrinker

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



[Add Hugh]

On Fri 09-06-17 12:03:50, Chris Wilson wrote:
> When we are called to relieve mempressue via the shrinker, the only way
> we can make progress is either by discarding unwanted pages (those
> objects that userspace has marked MADV_DONTNEED) or by reclaiming the
> dirty objects via swap. As we know that is the only way to make further
> progress, we can initiate the writeback as we invalidate the objects.
> This means the objects we put onto the inactive anon lru list are
> already marked for reclaim+writeback and so will trigger a wait upon the
> writeback inside direct reclaim, greatly improving the success rate of
> direct reclaim on i915 objects.
> 
> The corollary is that we may start a slow swap on opportunistic
> mempressure from the likes of the compaction + migration kthreads. This
> is limited by those threads only being allowed to shrink idle pages, but
> also that if we reactivate the page before it is swapped out by gpu
> activity, we only page the cost of repinning the page. The cost is most
> felt when an object is reused after mempressure, which hopefully
> excludes the latency sensitive tasks (as we are just extending the
> impact of swap thrashing to them).

I am not sure you can start writeback on shmem while it is not in the
swapcache. Hugh?

> Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
> Cc: Mika Kuoppala <mika.kuoppala@xxxxxxxxxxxxxxx>
> Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx>
> Cc: Matthew Auld <matthew.auld@xxxxxxxxx>
> Cc: Daniel Vetter <daniel.vetter@xxxxxxxx>
> Cc: Michal Hocko <mhocko@xxxxxxxx>
> ---
>  drivers/gpu/drm/i915/i915_drv.h          |  2 +-
>  drivers/gpu/drm/i915/i915_gem.c          | 27 ++--------------
>  drivers/gpu/drm/i915/i915_gem_shrinker.c | 55 +++++++++++++++++++++++++++++++-
>  3 files changed, 57 insertions(+), 27 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 602fb3324484..3ffe6504b391 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -3332,7 +3332,7 @@ enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
>  
>  void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
>  				 enum i915_mm_subclass subclass);
> -void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
> +void __i915_gem_object_truncate(struct drm_i915_gem_object *obj);
>  
>  enum i915_map_type {
>  	I915_MAP_WB = 0,
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 31cbe78171a9..1de94a8399a2 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2176,8 +2176,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
>  }
>  
>  /* Immediately discard the backing storage */
> -static void
> -i915_gem_object_truncate(struct drm_i915_gem_object *obj)
> +void __i915_gem_object_truncate(struct drm_i915_gem_object *obj)
>  {
>  	i915_gem_object_free_mmap_offset(obj);
>  
> @@ -2194,28 +2193,6 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
>  	obj->mm.pages = ERR_PTR(-EFAULT);
>  }
>  
> -/* Try to discard unwanted pages */
> -void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
> -{
> -	struct address_space *mapping;
> -
> -	lockdep_assert_held(&obj->mm.lock);
> -	GEM_BUG_ON(obj->mm.pages);
> -
> -	switch (obj->mm.madv) {
> -	case I915_MADV_DONTNEED:
> -		i915_gem_object_truncate(obj);
> -	case __I915_MADV_PURGED:
> -		return;
> -	}
> -
> -	if (obj->base.filp == NULL)
> -		return;
> -
> -	mapping = obj->base.filp->f_mapping,
> -	invalidate_mapping_pages(mapping, 0, (loff_t)-1);
> -}
> -
>  static void
>  i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
>  			      struct sg_table *pages)
> @@ -4212,7 +4189,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
>  
>  	/* if the object is no longer attached, discard its backing storage */
>  	if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
> -		i915_gem_object_truncate(obj);
> +		__i915_gem_object_truncate(obj);
>  
>  	args->retained = obj->mm.madv != __I915_MADV_PURGED;
>  	mutex_unlock(&obj->mm.lock);
> diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> index 1032f98add11..9f68304d6862 100644
> --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
> +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> @@ -127,6 +127,59 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
>  	return !READ_ONCE(obj->mm.pages);
>  }
>  
> +static void __start_writeback(struct drm_i915_gem_object *obj)
> +{
> +	struct address_space *mapping;
> +	struct writeback_control wbc = {
> +		.sync_mode = WB_SYNC_NONE,
> +		.nr_to_write = SWAP_CLUSTER_MAX,
> +		.range_start = 0,
> +		.range_end = LLONG_MAX,
> +		.for_reclaim = 1,
> +	};
> +	unsigned long i;
> +
> +	lockdep_assert_held(&obj->mm.lock);
> +	GEM_BUG_ON(obj->mm.pages);
> +
> +	switch (obj->mm.madv) {
> +	case I915_MADV_DONTNEED:
> +		__i915_gem_object_truncate(obj);
> +	case __I915_MADV_PURGED:
> +		return;
> +	}
> +
> +	if (!obj->base.filp)
> +		return;
> +
> +	/* Force any other users of this object to refault */
> +	mapping = obj->base.filp->f_mapping;
> +	unmap_mapping_range(mapping, 0, (loff_t)-1, 0);
> +
> +	/* Begin writeback on each dirty page */
> +	for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
> +		struct page *page;
> +
> +		page = find_lock_entry(mapping, i);
> +		if (!page || radix_tree_exceptional_entry(page))
> +			continue;
> +
> +		if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
> +			int ret;
> +
> +			SetPageReclaim(page);
> +			ret = mapping->a_ops->writepage(page, &wbc);
> +			if (!PageWriteback(page))
> +				ClearPageReclaim(page);
> +			if (!ret)
> +				goto put;
> +		}
> +		unlock_page(page);
> +put:
> +		put_page(page);
> +	}
> +}
> +
>  /**
>   * i915_gem_shrink - Shrink buffer object caches
>   * @dev_priv: i915 device
> @@ -239,7 +292,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
>  				mutex_lock_nested(&obj->mm.lock,
>  						  I915_MM_SHRINKER);
>  				if (!obj->mm.pages) {
> -					__i915_gem_object_invalidate(obj);
> +					__start_writeback(obj);
>  					list_del_init(&obj->global_link);
>  					count += obj->base.size >> PAGE_SHIFT;
>  				}
> -- 
> 2.11.0
> 

-- 
Michal Hocko
SUSE Labs
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux