[PATCH v2] drm/i915/gem: Free pages before rcu-freeing the object

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



As we have dropped the final reference to the object, we do not need to
wait until after the rcu grace period to drop its pages. We still require
struct_mutex to completely unbind the object to release the pages, so we
still need a free-worker to manage that from process context. By
scheduling the release of pages before waiting for the rcu should mean
that we are not trapping those pages from beyond the reach of the
shrinker.

v2: Pass along the request to skip if the vma is busy to the underlying
unbind routine, to avoid checking the reservation underneath the
i915->mm.obj_lock which may be used from inside irq context.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111035
Fixes: a93615f900bd ("drm/i915: Throw away the active object retirement complexity")
Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
Cc: Matthew Auld <matthew.auld@xxxxxxxxx>
---
 drivers/gpu/drm/i915/gem/i915_gem_object.c   | 82 +++++++++-----------
 drivers/gpu/drm/i915/gem/i915_gem_phys.c     |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 18 +++--
 drivers/gpu/drm/i915/gem/i915_gem_userptr.c  |  2 +-
 drivers/gpu/drm/i915/i915_drv.h              | 15 ++--
 drivers/gpu/drm/i915/i915_gem.c              |  9 ++-
 6 files changed, 67 insertions(+), 61 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 43194fbcbc2e..d3e96f09c6b7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -146,6 +146,18 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 	}
 }
 
+static void __i915_gem_free_object_rcu(struct rcu_head *head)
+{
+	struct drm_i915_gem_object *obj =
+		container_of(head, typeof(*obj), rcu);
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+	i915_gem_object_free(obj);
+
+	GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
+	atomic_dec(&i915->mm.free_count);
+}
+
 static void __i915_gem_free_objects(struct drm_i915_private *i915,
 				    struct llist_node *freed)
 {
@@ -168,22 +180,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 		GEM_BUG_ON(!list_empty(&obj->vma.list));
 		GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
 
-		/*
-		 * This serializes freeing with the shrinker. Since the free
-		 * is delayed, first by RCU then by the workqueue, we want the
-		 * shrinker to be able to free pages of unreferenced objects,
-		 * or else we may oom whilst there are plenty of deferred
-		 * freed objects.
-		 */
-		if (i915_gem_object_has_pages(obj) &&
-		    i915_gem_object_is_shrinkable(obj)) {
-			unsigned long flags;
-
-			spin_lock_irqsave(&i915->mm.obj_lock, flags);
-			list_del_init(&obj->mm.link);
-			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-		}
-
 		mutex_unlock(&i915->drm.struct_mutex);
 
 		GEM_BUG_ON(atomic_read(&obj->bind_count));
@@ -197,19 +193,15 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 		atomic_set(&obj->mm.pages_pin_count, 0);
 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
 		GEM_BUG_ON(i915_gem_object_has_pages(obj));
+		bitmap_free(obj->bit_17);
 
 		if (obj->base.import_attach)
 			drm_prime_gem_destroy(&obj->base, NULL);
 
 		drm_gem_object_release(&obj->base);
 
-		bitmap_free(obj->bit_17);
-		i915_gem_object_free(obj);
-
-		GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
-		atomic_dec(&i915->mm.free_count);
-
-		cond_resched();
+		/* But keep the pointer alive for RCU-protected lookups */
+		call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
 	}
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 }
@@ -260,18 +252,34 @@ static void __i915_gem_free_work(struct work_struct *work)
 	spin_unlock(&i915->mm.free_lock);
 }
 
-static void __i915_gem_free_object_rcu(struct rcu_head *head)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
-	struct drm_i915_gem_object *obj =
-		container_of(head, typeof(*obj), rcu);
+	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 
 	/*
-	 * We reuse obj->rcu for the freed list, so we had better not treat
-	 * it like a rcu_head from this point forwards. And we expect all
-	 * objects to be freed via this path.
+	 * Before we free the object, make sure any pure RCU-only
+	 * read-side critical sections are complete, e.g.
+	 * i915_gem_busy_ioctl(). For the corresponding synchronized
+	 * lookup see i915_gem_object_lookup_rcu().
 	 */
-	destroy_rcu_head(&obj->rcu);
+	atomic_inc(&i915->mm.free_count);
+
+	/*
+	 * This serializes freeing with the shrinker. Since the free
+	 * is delayed, first by RCU then by the workqueue, we want the
+	 * shrinker to be able to free pages of unreferenced objects,
+	 * or else we may oom whilst there are plenty of deferred
+	 * freed objects.
+	 */
+	if (i915_gem_object_has_pages(obj) &&
+	    i915_gem_object_is_shrinkable(obj)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&i915->mm.obj_lock, flags);
+		list_del_init(&obj->mm.link);
+		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+	}
 
 	/*
 	 * Since we require blocking on struct_mutex to unbind the freed
@@ -287,20 +295,6 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
 		queue_work(i915->wq, &i915->mm.free_work);
 }
 
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
-{
-	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-
-	/*
-	 * Before we free the object, make sure any pure RCU-only
-	 * read-side critical sections are complete, e.g.
-	 * i915_gem_busy_ioctl(). For the corresponding synchronized
-	 * lookup see i915_gem_object_lookup_rcu().
-	 */
-	atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
-	call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
-}
-
 static inline enum fb_op_origin
 fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
 {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 7b900ee4ed8d..e3183e4c23ee 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -159,7 +159,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 	if (obj->ops != &i915_gem_shmem_ops)
 		return -EINVAL;
 
-	err = i915_gem_object_unbind(obj);
+	err = i915_gem_object_unbind(obj, 0);
 	if (err)
 		return err;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index d99f1a600b96..56a7c6a7f7f4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -88,10 +88,18 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
 	return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
 }
 
-static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
+static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
+			      unsigned long shrink)
 {
-	if (i915_gem_object_unbind(obj) == 0)
+	unsigned long flags;
+
+	flags = 0;
+	if (!(shrink & I915_SHRINK_ACTIVE))
+		flags = I915_GEM_OBJECT_UNBIND_ONLY_IF_IDLE;
+
+	if (i915_gem_object_unbind(obj, flags) == 0)
 		__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+
 	return !i915_gem_object_has_pages(obj);
 }
 
@@ -229,9 +237,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
 				continue;
 
 			if (!(shrink & I915_SHRINK_ACTIVE) &&
-			    (i915_gem_object_is_framebuffer(obj) ||
-			     !reservation_object_test_signaled_rcu(obj->base.resv,
-								   true)))
+			    i915_gem_object_is_framebuffer(obj))
 				continue;
 
 			if (!(shrink & I915_SHRINK_BOUND) &&
@@ -246,7 +252,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
 
 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 
-			if (unsafe_drop_pages(obj)) {
+			if (unsafe_drop_pages(obj, shrink)) {
 				/* May arrive from get_pages on another bo */
 				mutex_lock_nested(&obj->mm.lock,
 						  I915_MM_SHRINKER);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 528b61678334..b232945ebdb5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -150,7 +150,7 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 			}
 		}
 
-		ret = i915_gem_object_unbind(obj);
+		ret = i915_gem_object_unbind(obj, 0);
 		if (ret == 0)
 			ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
 		i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 02dd9f9f3a89..240075271440 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2446,18 +2446,17 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
 
 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
 {
-	if (!atomic_read(&i915->mm.free_count))
-		return;
-
-	/* A single pass should suffice to release all the freed objects (along
+	/*
+	 * A single pass should suffice to release all the freed objects (along
 	 * most call paths) , but be a little more paranoid in that freeing
 	 * the objects does take a little amount of time, during which the rcu
 	 * callbacks could have added new objects into the freed list, and
 	 * armed the work again.
 	 */
-	do {
+	while (atomic_read(&i915->mm.free_count)) {
+		flush_work(&i915->mm.free_work);
 		rcu_barrier();
-	} while (flush_work(&i915->mm.free_work));
+	}
 }
 
 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
@@ -2488,7 +2487,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 u64 alignment,
 			 u64 flags);
 
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+			   unsigned long flags);
+#define I915_GEM_OBJECT_UNBIND_ONLY_IF_IDLE BIT(0)
 
 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b7f290b77f8f..fb36e1d23c4b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -101,7 +101,8 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 	return 0;
 }
 
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+			   unsigned long flags)
 {
 	struct i915_vma *vma;
 	LIST_HEAD(still_in_list);
@@ -116,7 +117,11 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 		list_move_tail(&vma->obj_link, &still_in_list);
 		spin_unlock(&obj->vma.lock);
 
-		ret = i915_vma_unbind(vma);
+		if (flags & I915_GEM_OBJECT_UNBIND_ONLY_IF_IDLE &&
+		    i915_vma_is_active(vma))
+			ret = -EBUSY;
+		else
+			ret = i915_vma_unbind(vma);
 
 		spin_lock(&obj->vma.lock);
 	}
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [AMD Graphics]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux