[RFC PATCH 153/162] drm/i915: Implement eviction locking v2

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Maarten Lankhorst <maarten.lankhorst@xxxxxxxxxxxxxxx>

Use a separate acquire context list and a separate locking function
for objects that are locked for eviction. These objects are then
properly referenced while on the list and can be unlocked early in
the ww transaction.

Co-developed-by: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx>
Signed-off-by: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@xxxxxxxxxxxxxxx>
Cc: Matthew Auld <matthew.auld@xxxxxxxxx>
---
 drivers/gpu/drm/i915/gem/i915_gem_object.h    | 67 +++++++++++++++++--
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |  5 ++
 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c  | 14 +++-
 drivers/gpu/drm/i915/i915_gem_ww.c            | 51 ++++++++++----
 drivers/gpu/drm/i915/i915_gem_ww.h            |  3 +
 5 files changed, 122 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 52a36b4052f0..e237b0fb0e79 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -158,6 +158,32 @@ static inline void assert_object_held_shared(struct drm_i915_gem_object *obj)
 		assert_object_held(obj);
 }
 
+static inline int
+i915_gem_object_lock_to_evict(struct drm_i915_gem_object *obj,
+			      struct i915_gem_ww_ctx *ww)
+{
+	int ret;
+
+	if (ww->intr)
+		ret = dma_resv_lock_interruptible(obj->base.resv, &ww->ctx);
+	else
+		ret = dma_resv_lock(obj->base.resv, &ww->ctx);
+
+	if (!ret) {
+		list_add_tail(&obj->obj_link, &ww->eviction_list);
+		i915_gem_object_get(obj);
+		obj->evict_locked = true;
+	}
+
+	GEM_WARN_ON(ret == -EALREADY);
+	if (ret == -EDEADLK) {
+		ww->contended_evict = true;
+		ww->contended = i915_gem_object_get(obj);
+	}
+
+	return ret;
+}
+
 static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
 					 struct i915_gem_ww_ctx *ww,
 					 bool intr)
@@ -169,13 +195,25 @@ static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
 	else
 		ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
 
-	if (!ret && ww)
+	if (!ret && ww) {
 		list_add_tail(&obj->obj_link, &ww->obj_list);
-	if (ret == -EALREADY)
-		ret = 0;
+		obj->evict_locked = false;
+	}
 
-	if (ret == -EDEADLK)
+	if (ret == -EALREADY) {
+		ret = 0;
+		/* We've already evicted an object needed for this batch. */
+		if (obj->evict_locked) {
+			list_move_tail(&obj->obj_link, &ww->obj_list);
+			i915_gem_object_put(obj);
+			obj->evict_locked = false;
+		}
+	}
+
+	if (ret == -EDEADLK) {
+		ww->contended_evict = false;
 		ww->contended = i915_gem_object_get(obj);
+	}
 
 	return ret;
 }
@@ -580,6 +618,27 @@ i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
 		__i915_gem_object_invalidate_frontbuffer(obj, origin);
 }
 
+/**
+ * i915_gem_get_locking_ctx - Get the locking context of a locked object
+ * if any.
+ *
+ * @obj: The object to get the locking ctx from
+ *
+ * RETURN: The locking context if the object was locked using a context.
+ * NULL otherwise.
+ */
+static inline struct i915_gem_ww_ctx *
+i915_gem_get_locking_ctx(const struct drm_i915_gem_object *obj)
+{
+	struct ww_acquire_ctx *ctx;
+
+	ctx = obj->base.resv->lock.ctx;
+	if (!ctx)
+		return NULL;
+
+	return container_of(ctx, struct i915_gem_ww_ctx, ctx);
+}
+
 #ifdef CONFIG_MMU_NOTIFIER
 static inline bool
 i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 331d113f7d5b..c42c0d3d5d67 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -142,6 +142,11 @@ struct drm_i915_gem_object {
 	 */
 	struct list_head obj_link;
 
+	/**
+	 * @evict_locked: Whether @obj_link sits on the eviction_list
+	 */
+	bool evict_locked;
+
 	/** Stolen memory for this object, instead of being backed by shmem. */
 	struct drm_mm_node *stolen;
 	union {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 27674048f17d..59d0f14b90ea 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -100,6 +100,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
 		unsigned long *nr_scanned,
 		unsigned int shrink)
 {
+	struct drm_i915_gem_object *obj;
 	const struct {
 		struct list_head *list;
 		unsigned int bit;
@@ -164,7 +165,6 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
 	 */
 	for (phase = phases; phase->list; phase++) {
 		struct list_head still_in_list;
-		struct drm_i915_gem_object *obj;
 		unsigned long flags;
 
 		if ((shrink & phase->bit) == 0)
@@ -197,6 +197,10 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
 			if (!can_release_pages(obj))
 				continue;
 
+			/* Already locked this object? */
+			if (ww && ww == i915_gem_get_locking_ctx(obj))
+				continue;
+
 			if (!kref_get_unless_zero(&obj->base.refcount))
 				continue;
 
@@ -209,7 +213,11 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
 					if (!i915_gem_object_trylock(obj))
 						goto skip;
 				} else {
-					err = i915_gem_object_lock(obj, ww);
+					err = i915_gem_object_lock_to_evict(obj, ww);
+					if (err == -EALREADY) {
+						err = 0;
+						goto skip;
+					}
 					if (err)
 						goto skip;
 				}
@@ -235,6 +243,8 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
 		if (err)
 			return err;
 	}
+	if (ww)
+		i915_gem_ww_ctx_unlock_evictions(ww);
 
 	if (shrink & I915_SHRINK_BOUND)
 		intel_runtime_pm_put(&i915->runtime_pm, wakeref);
diff --git a/drivers/gpu/drm/i915/i915_gem_ww.c b/drivers/gpu/drm/i915/i915_gem_ww.c
index 43960d8595eb..811bf7677d78 100644
--- a/drivers/gpu/drm/i915/i915_gem_ww.c
+++ b/drivers/gpu/drm/i915/i915_gem_ww.c
@@ -10,24 +10,45 @@ void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ww, bool intr)
 {
 	ww_acquire_init(&ww->ctx, &reservation_ww_class);
 	INIT_LIST_HEAD(&ww->obj_list);
+	INIT_LIST_HEAD(&ww->eviction_list);
 	ww->intr = intr;
 	ww->contended = NULL;
+	ww->contended_evict = false;
+}
+
+void i915_gem_ww_ctx_unlock_evictions(struct i915_gem_ww_ctx *ww)
+{
+	struct drm_i915_gem_object *obj, *next;
+
+	list_for_each_entry_safe(obj, next, &ww->eviction_list, obj_link) {
+		list_del(&obj->obj_link);
+		GEM_WARN_ON(!obj->evict_locked);
+		i915_gem_object_unlock(obj);
+		i915_gem_object_put(obj);
+	}
 }
 
 static void i915_gem_ww_ctx_unlock_all(struct i915_gem_ww_ctx *ww)
 {
-	struct drm_i915_gem_object *obj;
+	struct drm_i915_gem_object *obj, *next;
 
-	while ((obj = list_first_entry_or_null(&ww->obj_list, struct drm_i915_gem_object, obj_link))) {
+	list_for_each_entry_safe(obj, next, &ww->obj_list, obj_link) {
 		list_del(&obj->obj_link);
+		GEM_WARN_ON(obj->evict_locked);
 		i915_gem_object_unlock(obj);
 	}
+
+	i915_gem_ww_ctx_unlock_evictions(ww);
 }
 
 void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj)
 {
+	bool evict_locked = obj->evict_locked;
+
 	list_del(&obj->obj_link);
 	i915_gem_object_unlock(obj);
+	if (evict_locked)
+		i915_gem_object_put(obj);
 }
 
 void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ww)
@@ -39,27 +60,33 @@ void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ww)
 
 int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ww)
 {
+	struct drm_i915_gem_object *obj = ww->contended;
 	int ret = 0;
 
-	if (WARN_ON(!ww->contended))
+	if (WARN_ON(!obj))
 		return -EINVAL;
 
 	i915_gem_ww_ctx_unlock_all(ww);
 	if (ww->intr)
-		ret = dma_resv_lock_slow_interruptible(ww->contended->base.resv, &ww->ctx);
+		ret = dma_resv_lock_slow_interruptible(obj->base.resv, &ww->ctx);
 	else
-		dma_resv_lock_slow(ww->contended->base.resv, &ww->ctx);
+		dma_resv_lock_slow(obj->base.resv, &ww->ctx);
+	if (ret)
+		goto out;
 
 	/*
-	 * Unlocking the contended lock again, as might not need it in
-	 * the retried transaction. This does not increase starvation,
-	 * but it's opening up for a wakeup flood if there are many
-	 * transactions relaxing on this object.
+	 * Unlocking the contended lock again, if it was locked for eviction.
+	 * We will most likely not need it in the retried transaction.
 	 */
-	if (!ret)
-		dma_resv_unlock(ww->contended->base.resv);
+	if (ww->contended_evict) {
+		dma_resv_unlock(obj->base.resv);
+	} else {
+		obj->evict_locked = false;
+		list_add_tail(&obj->obj_link, &ww->obj_list);
+	}
 
-	i915_gem_object_put(ww->contended);
+out:
+	i915_gem_object_put(obj);
 	ww->contended = NULL;
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_ww.h b/drivers/gpu/drm/i915/i915_gem_ww.h
index f6b1a796667b..11793b170cc2 100644
--- a/drivers/gpu/drm/i915/i915_gem_ww.h
+++ b/drivers/gpu/drm/i915/i915_gem_ww.h
@@ -10,15 +10,18 @@
 struct i915_gem_ww_ctx {
 	struct ww_acquire_ctx ctx;
 	struct list_head obj_list;
+	struct list_head eviction_list;
 	struct drm_i915_gem_object *contended;
 	unsigned short intr;
 	unsigned short loop;
+	unsigned short contended_evict;
 };
 
 void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ctx, bool intr);
 void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ctx);
 int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ctx);
 void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj);
+void i915_gem_ww_ctx_unlock_evictions(struct i915_gem_ww_ctx *ww);
 
 /* Internal functions used by the inlines! Don't use. */
 static inline int __i915_gem_ww_fini(struct i915_gem_ww_ctx *ww, int err)
-- 
2.26.2

_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/dri-devel




[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux