From: John Harrison <John.C.Harrison@xxxxxxxxx>
Added reference counting of the request structure around __wait_seqno() calls.
This is a precursor to updating the wait code itself to take the request rather
than a seqno. At that point, it would be a Bad Idea for a request object to be
retired and freed while the wait code is still using it.
v3:
Note that even though the mutex lock is held during a call to i915_wait_seqno(),
it is still necessary to explicitly bump the reference count. It appears that
the shrinker can asynchronously retire items even though the mutex is locked.
For: VIZ-4377
Signed-off-by: John Harrison <John.C.Harrison@xxxxxxxxx>
Reviewed-by: Thomas Daniel <Thomas.Daniel@xxxxxxxxx>
---
drivers/gpu/drm/i915/i915_gem.c | 28 +++++++++++++++++++++++-----
1 file changed, 23 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9ac9157..71303a1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1333,8 +1333,11 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
- NULL, NULL);
+ i915_gem_request_reference(req);
+ ret = __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
+ NULL, NULL);
+ i915_gem_request_unreference(req);
+ return ret;
}
static int
@@ -1417,10 +1420,12 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ i915_gem_request_reference(req);
mutex_unlock(&dev->struct_mutex);
ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
file_priv);
mutex_lock(&dev->struct_mutex);
+ i915_gem_request_unreference(req);
if (ret)
return ret;
@@ -2920,6 +2925,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_request *req;
struct intel_engine_cs *ring = NULL;
unsigned reset_counter;
u32 seqno = 0;
@@ -2946,7 +2952,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (!obj->active || !obj->last_read_req)
goto out;
- seqno = i915_gem_request_get_seqno(obj->last_read_req);
+ req = obj->last_read_req;
+ seqno = i915_gem_request_get_seqno(req);
WARN_ON(seqno == 0);
ring = obj->ring;
@@ -2960,10 +2967,15 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
drm_gem_object_unreference(&obj->base);
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ i915_gem_request_reference(req);
mutex_unlock(&dev->struct_mutex);
- return __i915_wait_seqno(ring, seqno, reset_counter, true,
- &args->timeout_ns, file->driver_priv);
+ ret = __i915_wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
+ file->driver_priv);
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_request_unreference(req);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
out:
drm_gem_object_unreference(&obj->base);
@@ -4122,6 +4134,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
target = request;
}
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
+ if (target)
+ i915_gem_request_reference(target);
spin_unlock(&file_priv->mm.lock);
if (target == NULL)
@@ -4133,6 +4147,10 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_request_unreference(target);
+ mutex_unlock(&dev->struct_mutex);
+
return ret;
}