From: John Harrison <John.C.Harrison@xxxxxxxxx> The scheduler decouples the submission of batch buffers to the driver with submission of batch buffers to the hardware. Thus it is possible for an application to submit work, then close the DRM handle and free up all the resources that piece of work wishes to use before the work has even been submitted to the hardware. To prevent this, the scheduler needs to be informed of the DRM close event so that it can force through any outstanding work attributed to that file handle. Change-Id: I24ac056c062b075ff1cc5e2ed2d3fa8e17e85951 For: VIZ-1587 Signed-off-by: John Harrison <John.C.Harrison@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_dma.c | 3 ++ drivers/gpu/drm/i915/i915_scheduler.c | 66 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_scheduler.h | 2 ++ 3 files changed, 71 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 731cf31..c2f9c03 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -46,6 +46,7 @@ #include <linux/vga_switcheroo.h> #include <linux/slab.h> #include <acpi/video.h> +#include "i915_scheduler.h" #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/oom.h> @@ -1250,6 +1251,8 @@ void i915_driver_lastclose(struct drm_device *dev) void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) { + i915_scheduler_closefile(dev, file); + mutex_lock(&dev->struct_mutex); i915_gem_context_close(dev, file); i915_gem_release(dev, file); diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 6386d1c..25e7ade 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -745,3 +745,69 @@ static int i915_scheduler_remove_dependent(struct i915_scheduler *scheduler, return 0; } + +int i915_scheduler_closefile(struct drm_device *dev, struct drm_file *file) +{ + struct i915_scheduler_queue_entry *node; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_scheduler *scheduler = dev_priv->scheduler; + struct drm_i915_gem_request *req; + struct intel_engine_cs *ring; + int i, ret; + unsigned long flags; + bool found; + + if (!scheduler) + return 0; + + for_each_ring(ring, dev_priv, i) { + do { + spin_lock_irqsave(&scheduler->lock, flags); + + found = false; + list_for_each_entry(node, &scheduler->node_queue[ring->id], link) { + if (I915_SQS_IS_COMPLETE(node)) + continue; + + if (node->params.file != file) + continue; + + found = true; + req = node->params.request; + i915_gem_request_reference(req); + break; + } + + spin_unlock_irqrestore(&scheduler->lock, flags); + + if (found) { + do { + mutex_lock(&dev->struct_mutex); + ret = i915_wait_request(req); + mutex_unlock(&dev->struct_mutex); + if (ret == -EAGAIN) + msleep(20); + } while (ret == -EAGAIN); + + mutex_lock(&dev->struct_mutex); + i915_gem_request_unreference(req); + mutex_unlock(&dev->struct_mutex); + } + } while (found); + } + + spin_lock_irqsave(&scheduler->lock, flags); + for_each_ring(ring, dev_priv, i) { + list_for_each_entry(node, &scheduler->node_queue[ring->id], link) { + if (node->params.file != file) + continue; + + WARN_ON(!I915_SQS_IS_COMPLETE(node)); + + node->params.file = NULL; + } + } + spin_unlock_irqrestore(&scheduler->lock, flags); + + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h index 93faf40..9736b8d 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.h +++ b/drivers/gpu/drm/i915/i915_scheduler.h @@ -85,6 +85,8 @@ enum { bool i915_scheduler_is_enabled(struct drm_device *dev); int i915_scheduler_init(struct drm_device *dev); +int i915_scheduler_closefile(struct drm_device *dev, + struct drm_file *file); int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe); bool i915_scheduler_notify_request(struct drm_i915_gem_request *req); -- 1.9.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx