Re: [PATCH 5/6] drm/i915: Split idling from forcing context switch

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On ke, 2016-05-25 at 12:48 +0100, Chris Wilson wrote:
> We only need to force a switch to the kernel context placeholder during
> eviction. All other uses of i915_gpu_idle() just want to wait until
> existing work on the GPU is idle. Rename i915_gpu_idle() to
> i915_gem_wait_for_idle() to avoid any implications about "parking" the
> context first.
> 
> Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
> ---
>  drivers/gpu/drm/i915/i915_debugfs.c      |  2 +-
>  drivers/gpu/drm/i915/i915_drv.h          |  2 +-
>  drivers/gpu/drm/i915/i915_gem.c          | 20 +++----------
>  drivers/gpu/drm/i915/i915_gem_evict.c    | 51 +++++++++++++++++++++++++++++---
>  drivers/gpu/drm/i915/i915_gem_gtt.c      |  2 +-
>  drivers/gpu/drm/i915/i915_gem_shrinker.c |  2 +-
>  6 files changed, 55 insertions(+), 24 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index ac7e5692496d..615cef736356 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -4941,7 +4941,7 @@ i915_drop_caches_set(void *data, u64 val)
>  		return ret;
>  
>  	if (val & DROP_ACTIVE) {
> -		ret = i915_gpu_idle(dev);
> +		ret = i915_gem_wait_for_idle(dev_priv);
>  		if (ret)
>  			goto unlock;
>  	}
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index e4c8e341655c..d2719d96a274 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -3283,7 +3283,7 @@ int i915_gem_init_engines(struct drm_device *dev);
>  int __must_check i915_gem_init_hw(struct drm_device *dev);
>  void i915_gem_init_swizzling(struct drm_device *dev);
>  void i915_gem_cleanup_engines(struct drm_device *dev);
> -int __must_check i915_gpu_idle(struct drm_device *dev);
> +int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
>  int __must_check i915_gem_suspend(struct drm_device *dev);
>  void __i915_add_request(struct drm_i915_gem_request *req,
>  			struct drm_i915_gem_object *batch_obj,
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 61812a53e56d..e94758850c77 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -3420,29 +3420,17 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma)
>  	return __i915_vma_unbind(vma, false);
>  }
>  
> -int i915_gpu_idle(struct drm_device *dev)
> +int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
>  {
> -	struct drm_i915_private *dev_priv = dev->dev_private;
>  	struct intel_engine_cs *engine;
>  	int ret;
>  
> +	lockdep_assert_held(&dev_priv->dev->struct_mutex);
> +
>  	for_each_engine(engine, dev_priv) {
>  		if (engine->last_context == NULL)
>  			continue;
>  
> -		if (!i915.enable_execlists) {
> -			struct drm_i915_gem_request *req;
> -
> -			req = i915_gem_request_alloc(engine, NULL);
> -			if (IS_ERR(req))
> -				return PTR_ERR(req);
> -
> -			ret = i915_switch_context(req);
> -			i915_add_request_no_flush(req);
> -			if (ret)
> -				return ret;
> -		}
> -
>  		ret = intel_engine_idle(engine);
>  		if (ret)
>  			return ret;
> @@ -4703,7 +4691,7 @@ i915_gem_suspend(struct drm_device *dev)
>  	int ret = 0;
>  
>  	mutex_lock(&dev->struct_mutex);
> -	ret = i915_gpu_idle(dev);
> +	ret = i915_gem_wait_for_idle(dev_priv);
>  	if (ret)
>  		goto err;
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index b144c3f5c650..5741b58d186c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -33,6 +33,37 @@
>  #include "intel_drv.h"
>  #include "i915_trace.h"
>  
> +static int switch_to_pinned_context(struct drm_i915_private *dev_priv)

Why not switch_to_kernel_context as it's effectively what this does (-
execlist behaviour)?

> +{
> +	struct intel_engine_cs *engine;
> +
> +	if (i915.enable_execlists)
> +		return 0;
> +
> +	for_each_engine(engine, dev_priv) {
> +		struct drm_i915_gem_request *req;
> +		int ret;
> +
> +		if (engine->last_context == NULL)
> +			continue;
> +
> +		if (engine->last_context == dev_priv->kernel_context)
> +			continue;
> +
> +		req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
> +		if (IS_ERR(req))
> +			return PTR_ERR(req);
> +
> +		ret = i915_switch_context(req);
> +		i915_add_request_no_flush(req);
> +		if (ret)
> +			return ret;
> +	}
> +
> +	return 0;
> +}
> +
> +
>  static bool
>  mark_free(struct i915_vma *vma, struct list_head *unwind)
>  {
> @@ -150,11 +181,17 @@ none:
>  
>  	/* Only idle the GPU and repeat the search once */
>  	if (pass++ == 0) {
> -		ret = i915_gpu_idle(dev);
> +		struct drm_i915_private *dev_priv = to_i915(dev);
> +
> +		ret = switch_to_pinned_context(dev_priv);
>  		if (ret)
>  			return ret;
>  
> -		i915_gem_retire_requests(to_i915(dev));
> +		ret = i915_gem_wait_for_idle(dev_priv);
> +		if (ret)
> +			return ret;
> +
> +		i915_gem_retire_requests(dev_priv);
>  		goto search_again;
>  	}
>  
> @@ -261,11 +298,17 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
>  	trace_i915_gem_evict_vm(vm);
>  
>  	if (do_idle) {
> -		ret = i915_gpu_idle(vm->dev);
> +		struct drm_i915_private *dev_priv = to_i915(vm->dev);
> +
> +		ret = switch_to_pinned_context(dev_priv);
> +		if (ret)
> +			return ret;
> +
> +		ret = i915_gem_wait_for_idle(dev_priv);
>  		if (ret)
>  			return ret;
>  
> -		i915_gem_retire_requests(to_i915(vm->dev));
> +		i915_gem_retire_requests(dev_priv);
>  
>  		WARN_ON(!list_empty(&vm->active_list));
>  	}
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 46684779d4d6..84f2ed78cf88 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -2261,7 +2261,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
>  
>  	if (unlikely(ggtt->do_idle_maps)) {
>  		dev_priv->mm.interruptible = false;
> -		if (i915_gpu_idle(dev_priv->dev)) {
> +		if (i915_gem_wait_for_idle(dev_priv)) {
>  			DRM_ERROR("Couldn't idle GPU\n");

This wait_for scheme and the error message do not make sense together.

>  			/* Wait a bit, in hopes it avoids the hang */
>  			udelay(10);
> diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> index 538c30499848..886a8797566d 100644
> --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
> +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> @@ -408,7 +408,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
>  		return NOTIFY_DONE;
>  
>  	/* Force everything onto the inactive lists */
> -	ret = i915_gpu_idle(dev_priv->dev);
> +	ret = i915_gem_wait_for_idle(dev_priv);
>  	if (ret)
>  		goto out;
>  
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux