Re: [PATCH 50/64] drm/i915: Prepare i915_gem_active for annotations

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 07/07/16 09:41, Chris Wilson wrote:
In the future, we will want to add annotations to the i915_gem_active
struct. The API is thus expanded to hide direct access to the contents
of i915_gem_active and mediated instead through a number of helpers.

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
  drivers/gpu/drm/i915/i915_debugfs.c     |  13 ++--
  drivers/gpu/drm/i915/i915_gem.c         |  91 +++++++++++++----------
  drivers/gpu/drm/i915/i915_gem_dmabuf.c  |   4 +-
  drivers/gpu/drm/i915/i915_gem_fence.c   |  11 ++-
  drivers/gpu/drm/i915/i915_gem_request.h | 128 +++++++++++++++++++++++++++++++-
  drivers/gpu/drm/i915/i915_gem_tiling.c  |   2 +-
  drivers/gpu/drm/i915/i915_gem_userptr.c |   8 +-
  drivers/gpu/drm/i915/i915_gpu_error.c   |   9 ++-
  drivers/gpu/drm/i915/intel_display.c    |  15 ++--
  9 files changed, 208 insertions(+), 73 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index dd832eace487..ae1c640fc1c8 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -155,10 +155,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
  		   obj->base.write_domain);
  	for_each_engine_id(engine, dev_priv, id)
  		seq_printf(m, "%x ",
-			   i915_gem_request_get_seqno(obj->last_read[id].request));
+			   i915_gem_active_get_seqno(&obj->last_read[id]));
  	seq_printf(m, "] %x %x%s%s%s",
-		   i915_gem_request_get_seqno(obj->last_write.request),
-		   i915_gem_request_get_seqno(obj->last_fence.request),
+		   i915_gem_active_get_seqno(&obj->last_write),
+		   i915_gem_active_get_seqno(&obj->last_fence),
  		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
  		   obj->dirty ? " dirty" : "",
  		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
@@ -195,8 +195,11 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
  		*t = '\0';
  		seq_printf(m, " (%s mappable)", s);
  	}
-	if (obj->last_write.request)
-		seq_printf(m, " (%s)", obj->last_write.request->engine->name);
+
+	engine = i915_gem_active_get_engine(&obj->last_write);
+	if (engine)
+		seq_printf(m, " (%s)", engine->name);
+
  	if (obj->frontbuffer_bits)
  		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
  }
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5f302faf86e7..9c371e84b1bb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1349,27 +1349,30 @@ int
  i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
  			       bool readonly)
  {
+	struct drm_i915_gem_request *request;
  	struct reservation_object *resv;
  	int ret, i;

  	if (readonly) {
-		if (obj->last_write.request) {
-			ret = i915_wait_request(obj->last_write.request);
+		request = i915_gem_active_peek(&obj->last_write);

Why not get_request since you have get_engine? Or later there will be get_request with different semantics?

+		if (request) {
+			ret = i915_wait_request(request);
  			if (ret)
  				return ret;

-			i = obj->last_write.request->engine->id;
-			if (obj->last_read[i].request == obj->last_write.request)
+			i = request->engine->id;
+			if (i915_gem_active_peek(&obj->last_read[i]) == request)
  				i915_gem_object_retire__read(obj, i);
  			else
  				i915_gem_object_retire__write(obj);
  		}
  	} else {
  		for (i = 0; i < I915_NUM_ENGINES; i++) {
-			if (!obj->last_read[i].request)
+			request = i915_gem_active_peek(&obj->last_read[i]);
+			if (!request)
  				continue;

-			ret = i915_wait_request(obj->last_read[i].request);
+			ret = i915_wait_request(request);
  			if (ret)
  				return ret;

@@ -1397,9 +1400,9 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
  {
  	int ring = req->engine->id;

-	if (obj->last_read[ring].request == req)
+	if (i915_gem_active_peek(&obj->last_read[ring]) == req)
  		i915_gem_object_retire__read(obj, ring);
-	else if (obj->last_write.request == req)
+	else if (i915_gem_active_peek(&obj->last_write) == req)
  		i915_gem_object_retire__write(obj);

  	if (!i915_reset_in_progress(&req->i915->gpu_error))
@@ -1428,20 +1431,20 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
  	if (readonly) {
  		struct drm_i915_gem_request *req;

-		req = obj->last_write.request;
+		req = i915_gem_active_peek(&obj->last_write);
  		if (req == NULL)
  			return 0;

-		requests[n++] = i915_gem_request_get(req);
+		requests[n++] = req;

It used to take a reference and now it doesn't.

  	} else {
  		for (i = 0; i < I915_NUM_ENGINES; i++) {
  			struct drm_i915_gem_request *req;

-			req = obj->last_read[i].request;
+			req = i915_gem_active_peek(&obj->last_read[i]);
  			if (req == NULL)
  				continue;

-			requests[n++] = i915_gem_request_get(req);
+			requests[n++] = req;
  		}
  	}

@@ -2383,25 +2386,27 @@ void i915_vma_move_to_active(struct i915_vma *vma,
  static void
  i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
  {
-	GEM_BUG_ON(!obj->last_write.request);
-	GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write.request->engine)));
+	GEM_BUG_ON(!__i915_gem_active_is_busy(&obj->last_write));
+	GEM_BUG_ON(!(obj->active & intel_engine_flag(i915_gem_active_get_engine(&obj->last_write))));

-	i915_gem_request_assign(&obj->last_write.request, NULL);
+	i915_gem_active_set(&obj->last_write, NULL);

Aha!

  	intel_fb_obj_flush(obj, true, ORIGIN_CS);
  }

  static void
  i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
  {
+	struct intel_engine_cs *engine;
  	struct i915_vma *vma;

-	GEM_BUG_ON(!obj->last_read[ring].request);
+	GEM_BUG_ON(!__i915_gem_active_is_busy(&obj->last_read[ring]));
  	GEM_BUG_ON(!(obj->active & (1 << ring)));

  	list_del_init(&obj->engine_list[ring]);
-	i915_gem_request_assign(&obj->last_read[ring].request, NULL);
+	i915_gem_active_set(&obj->last_read[ring], NULL);

-	if (obj->last_write.request && obj->last_write.request->engine->id == ring)
+	engine = i915_gem_active_get_engine(&obj->last_write);
+	if (engine && engine->id == ring)
  		i915_gem_object_retire__write(obj);

  	obj->active &= ~(1 << ring);
@@ -2420,7 +2425,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
  			list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
  	}

-	i915_gem_request_assign(&obj->last_fence.request, NULL);
+	i915_gem_active_set(&obj->last_fence, NULL);
  	i915_gem_object_put(obj);
  }

@@ -2618,7 +2623,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
  				       struct drm_i915_gem_object,
  				       engine_list[engine->id]);

-		if (!list_empty(&obj->last_read[engine->id].request->list))
+		if (!list_empty(&i915_gem_active_peek(&obj->last_read[engine->id])->list))
  			break;

  		i915_gem_object_retire__read(obj, engine->id);
@@ -2746,7 +2751,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
  	for (i = 0; i < I915_NUM_ENGINES; i++) {
  		struct drm_i915_gem_request *req;

-		req = obj->last_read[i].request;
+		req = i915_gem_active_peek(&obj->last_read[i]);
  		if (req == NULL)
  			continue;

@@ -2786,7 +2791,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
  {
  	struct drm_i915_gem_wait *args = data;
  	struct drm_i915_gem_object *obj;
-	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
+	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
  	int i, n = 0;
  	int ret;

@@ -2822,20 +2827,21 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
  	i915_gem_object_put(obj);

  	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		if (!obj->last_read[i].request)
-			continue;
+		struct drm_i915_gem_request *req;

-		req[n++] = i915_gem_request_get(obj->last_read[i].request);
+		req = i915_gem_active_get(&obj->last_read[i]);

Oh right there is a get request one, this time preserving the reference taking behaviour.

+		if (req)
+			requests[n++] = req;
  	}

  	mutex_unlock(&dev->struct_mutex);

  	for (i = 0; i < n; i++) {
  		if (ret == 0)
-			ret = __i915_wait_request(req[i], true,
+			ret = __i915_wait_request(requests[i], true,
  						  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
  						  to_rps_client(file));
-		i915_gem_request_put(req[i]);
+		i915_gem_request_put(requests[i]);
  	}
  	return ret;

@@ -2908,7 +2914,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
  		     struct drm_i915_gem_request *to)
  {
  	const bool readonly = obj->base.pending_write_domain == 0;
-	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
+	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
  	int ret, i, n;

  	if (!obj->active)
@@ -2916,15 +2922,22 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,

  	n = 0;
  	if (readonly) {
-		if (obj->last_write.request)
-			req[n++] = obj->last_write.request;
+		struct drm_i915_gem_request *req;
+
+		req = i915_gem_active_peek(&obj->last_write);
+		if (req)
+			requests[n++] = req;
  	} else {
-		for (i = 0; i < I915_NUM_ENGINES; i++)
-			if (obj->last_read[i].request)
-				req[n++] = obj->last_read[i].request;
+		for (i = 0; i < I915_NUM_ENGINES; i++) {
+			struct drm_i915_gem_request *req;
+
+			req = i915_gem_active_peek(&obj->last_read[i]);
+			if (req)
+				requests[n++] = req;
+		}
  	}
  	for (i = 0; i < n; i++) {
-		ret = __i915_gem_object_sync(obj, to, req[i]);
+		ret = __i915_gem_object_sync(obj, to, requests[i]);
  		if (ret)
  			return ret;
  	}
@@ -4017,17 +4030,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,

  	args->busy = 0;
  	if (obj->active) {
+		struct drm_i915_gem_request *req;
  		int i;

  		for (i = 0; i < I915_NUM_ENGINES; i++) {
-			struct drm_i915_gem_request *req;
-
-			req = obj->last_read[i].request;
+			req = i915_gem_active_peek(&obj->last_read[i]);
  			if (req)
  				args->busy |= 1 << (16 + req->engine->exec_id);
  		}
-		if (obj->last_write.request)
-			args->busy |= obj->last_write.request->engine->exec_id;
+		req = i915_gem_active_peek(&obj->last_write);
+		if (req)
+			args->busy |= req->engine->exec_id;
  	}

  unref:
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index aa767ca28532..38000c59d456 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -235,7 +235,7 @@ static void export_fences(struct drm_i915_gem_object *obj,

  	active = obj->active;
  	for_each_active(active, idx) {
-		req = obj->last_read[idx].request;
+		req = i915_gem_active_peek(&obj->last_read[idx]);
  		if (!req)
  			continue;

@@ -243,7 +243,7 @@ static void export_fences(struct drm_i915_gem_object *obj,
  			reservation_object_add_shared_fence(resv, &req->fence);
  	}

-	req = obj->last_write.request;
+	req = i915_gem_active_peek(&obj->last_write);
  	if (req)
  		reservation_object_add_excl_fence(resv, &req->fence);

diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 9838046801bd..9fdbd66128a6 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -261,14 +261,13 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
  static int
  i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
  {
-	if (obj->last_fence.request) {
-		int ret = i915_wait_request(obj->last_fence.request);
-		if (ret)
-			return ret;
+	int ret;

-		i915_gem_request_assign(&obj->last_fence.request, NULL);
-	}
+	ret = i915_gem_active_wait(&obj->last_fence);
+	if (ret)
+		return ret;

+	i915_gem_active_set(&obj->last_fence, NULL);
  	return 0;
  }

diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index ff8c54fa955f..46d9b00a28d7 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -275,14 +275,138 @@ static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
   * resource including itself.
   */
  struct i915_gem_active {
-	struct drm_i915_gem_request *request;
+	struct drm_i915_gem_request *__request;
  };

+/**
+ * i915_gem_active_set - updates the tracker to watch the current request
+ * @active - the active tracker
+ * @request - the request to watch
+ *
+ * i915_gem_active_set() watches the given @request for completion. Whilst
+ * that @request is busy, the @active reports busy. When that @request is
+ * retired, the @active tracker is updated to report idle.
+ */
  static inline void
  i915_gem_active_set(struct i915_gem_active *active,
  		    struct drm_i915_gem_request *request)
  {
-	i915_gem_request_assign(&active->request, request);
+	i915_gem_request_assign(&active->__request, request);
+}
+
+/**
+ * i915_gem_active_peek - report the request being monitored
+ * @active - the active tracker
+ *
+ * i915_gem_active_peek() returns the current request being tracked, or NULL.
+ * It does not obtain a reference on the request for the caller, so the
+ * caller must hold struct_mutex.
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_peek(const struct i915_gem_active *active)
+{
+	return active->__request;
+}
+
+/**
+ * i915_gem_active_get - return a reference to the active request
+ * @active - the active tracker
+ *
+ * i915_gem_active_get() returns a reference to the active request, or NULL
+ * if the active tracker is idle. The caller must hold struct_mutex.
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_get(const struct i915_gem_active *active)
+{
+	struct drm_i915_gem_request *request;
+
+	request = i915_gem_active_peek(active);
+	if (!request || i915_gem_request_completed(request))

The check for request_completed feels like a hack - why it is needed?

+		return NULL;
+
+	return i915_gem_request_get(request);
+}
+
+/**
+ * __i915_gem_active_is_busy - report whether the active tracker is assigned
+ * @active - the active tracker
+ *
+ * __i915_gem_active_is_busy() returns true if the active tracker is currently
+ * assigned to a request. Due to the lazy retiring, that request may be idle
+ * and this may report stale information.
+ */
+static inline bool
+__i915_gem_active_is_busy(const struct i915_gem_active *active)
+{
+	return i915_gem_active_peek(active);
+}
+
+/**
+ * i915_gem_active_is_idle - report whether the active tracker is idle
+ * @active - the active tracker
+ *
+ * i915_gem_active_is_idle() returns true if the active tracker is currently
+ * unassigned or if the request is complete (but not yet retired). Requires
+ * the caller to hold struct_mutex (but that can be relaxed if desired).
+ */
+static inline bool
+i915_gem_active_is_idle(const struct i915_gem_active *active)
+{
+	struct drm_i915_gem_request *request;
+
+	request = i915_gem_active_peek(active);
+	if (!request || i915_gem_request_completed(request))
+		return true;
+
+	return false;
+}
+
+/**
+ * i915_gem_active_wait - waits until the request is completed
+ * @active - the active request on which to wait
+ *
+ * i915_gem_active_wait() waits until the request is completed before
+ * returning.
+ */
+static inline int __must_check
+i915_gem_active_wait(const struct i915_gem_active *active)
+{
+	struct drm_i915_gem_request *request;
+
+	request = i915_gem_active_peek(active);
+	if (!request)
+		return 0;
+
+	return i915_wait_request(request);
+}
+
+/**
+ * i915_gem_active_retire - waits until the request is retired
+ * @active - the active request on which to wait
+ *
+ * Unlike i915_gem_active_eait(), this i915_gem_active_retire() will

s/eait/wait/

+ * make sure the request is retired before returning.
+ */
+static inline int __must_check
+i915_gem_active_retire(const struct i915_gem_active *active)
+{
+	return i915_gem_active_wait(active);
+}

But how does it ensure anything different than i915_gem_active_wait when it calls the very function? Maybe in future patches there will be a difference?

+
+/* Convenience functions for peeking at state inside active's request whilst
+ * guarded by the struct_mutex.
+ */
+
+static inline uint32_t
+i915_gem_active_get_seqno(const struct i915_gem_active *active)
+{
+	return i915_gem_request_get_seqno(i915_gem_active_peek(active));
+}
+
+static inline struct intel_engine_cs *
+i915_gem_active_get_engine(const struct i915_gem_active *active)
+{
+	return i915_gem_request_get_engine(i915_gem_active_peek(active));
  }

  #define for_each_active(mask, idx) \
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 00d796da65fb..8cef2d6b291a 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -242,7 +242,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
  			}

  			obj->fence_dirty =
-				obj->last_fence.request ||
+				!i915_gem_active_is_idle(&obj->last_fence) ||
  				obj->fence_reg != I915_FENCE_REG_NONE;

  			obj->tiling_mode = args->tiling_mode;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 32f50a70ea42..00ab5e9d2eb7 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -74,11 +74,9 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
  	for (i = 0; i < I915_NUM_ENGINES; i++) {
  		struct drm_i915_gem_request *req;

-		req = obj->last_read[i].request;
-		if (req == NULL)
-			continue;
-
-		requests[n++] = i915_gem_request_get(req);
+		req = i915_gem_active_get(&obj->last_read[i]);
+		if (req)
+			requests[n++] = req;
  	}

  	mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 5e12b8ee49d2..9e1949f2f4dd 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -744,13 +744,14 @@ static void capture_bo(struct drm_i915_error_buffer *err,
  		       struct i915_vma *vma)
  {
  	struct drm_i915_gem_object *obj = vma->obj;
+	struct intel_engine_cs *engine;
  	int i;

  	err->size = obj->base.size;
  	err->name = obj->base.name;
  	for (i = 0; i < I915_NUM_ENGINES; i++)
-		err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read[i].request);
-	err->wseqno = i915_gem_request_get_seqno(obj->last_write.request);
+		err->rseqno[i] = i915_gem_active_get_seqno(&obj->last_read[i]);
+	err->wseqno = i915_gem_active_get_seqno(&obj->last_write);
  	err->gtt_offset = vma->node.start;
  	err->read_domains = obj->base.read_domains;
  	err->write_domain = obj->base.write_domain;
@@ -762,8 +763,10 @@ static void capture_bo(struct drm_i915_error_buffer *err,
  	err->dirty = obj->dirty;
  	err->purgeable = obj->madv != I915_MADV_WILLNEED;
  	err->userptr = obj->userptr.mm != NULL;
-	err->ring = obj->last_write.request ? obj->last_write.request->engine->id : -1;
  	err->cache_level = obj->cache_level;
+
+	engine = i915_gem_active_get_engine(&obj->last_write);
+	err->ring = engine ? engine->id : -1;
  }

  static u32 capture_active_bo(struct drm_i915_error_buffer *err,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 27ac6db4e26a..a0abe5588d17 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11465,7 +11465,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
  	if (resv && !reservation_object_test_signaled_rcu(resv, false))
  		return true;

-	return engine != i915_gem_request_get_engine(obj->last_write.request);
+	return engine != i915_gem_active_get_engine(&obj->last_write);
  }

  static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -11768,7 +11768,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
  	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
  		engine = &dev_priv->engine[BCS];
  	} else if (INTEL_INFO(dev)->gen >= 7) {
-		engine = i915_gem_request_get_engine(obj->last_write.request);
+		engine = i915_gem_active_get_engine(&obj->last_write);
  		if (engine == NULL || engine->id != RCS)
  			engine = &dev_priv->engine[BCS];
  	} else {
@@ -11789,9 +11789,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
  	if (mmio_flip) {
  		INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);

-		i915_gem_request_assign(&work->flip_queued_req,
-					obj->last_write.request);
-
+		work->flip_queued_req = i915_gem_active_get(&obj->last_write);
  		schedule_work(&work->mmio_work);
  	} else {
  		request = i915_gem_request_alloc(engine, engine->last_context);
@@ -14092,11 +14090,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
  	}

  	if (ret == 0) {
-		struct intel_plane_state *plane_state =
-			to_intel_plane_state(new_state);
-
-		i915_gem_request_assign(&plane_state->wait_req,
-					obj->last_write.request);
+		to_intel_plane_state(new_state)->wait_req =
+			i915_gem_active_get(&obj->last_write);
  	}

  	return ret;


Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux