Re: [PATCH 7/8] drm/sched: Re-group and rename the entity run-queue lock

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Am 09.09.24 um 19:19 schrieb Tvrtko Ursulin:
From: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxxx>

Christian suggested to rename the lock and improve the documentation of
what it protects. And to also re-order the structure members so all
protected by the lock are together in a block.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxxx>
Cc: Christian König <christian.koenig@xxxxxxx>
Cc: Alex Deucher <alexander.deucher@xxxxxxx>
Cc: Luben Tuikov <ltuikov89@xxxxxxxxx>
Cc: Matthew Brost <matthew.brost@xxxxxxxxx>
Cc: Philipp Stanner <pstanner@xxxxxxxxxx>

Reviewed-by: Christian König <christian.koenig@xxxxxxx>

---
  drivers/gpu/drm/scheduler/sched_entity.c | 24 ++++++++++++------------
  drivers/gpu/drm/scheduler/sched_main.c   |  6 +++---
  include/drm/gpu_scheduler.h              | 15 ++++++++-------
  3 files changed, 23 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 2da677681291..b4c4f9923e0b 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -105,7 +105,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
  	/* We start in an idle state. */
  	complete_all(&entity->entity_idle);
- spin_lock_init(&entity->rq_lock);
+	spin_lock_init(&entity->lock);
  	spsc_queue_init(&entity->job_queue);
atomic_set(&entity->fence_seq, 0);
@@ -133,10 +133,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
  {
  	WARN_ON(!num_sched_list || !sched_list);
- spin_lock(&entity->rq_lock);
+	spin_lock(&entity->lock);
  	entity->sched_list = sched_list;
  	entity->num_sched_list = num_sched_list;
-	spin_unlock(&entity->rq_lock);
+	spin_unlock(&entity->lock);
  }
  EXPORT_SYMBOL(drm_sched_entity_modify_sched);
@@ -244,10 +244,10 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
  	if (!entity->rq)
  		return;
- spin_lock(&entity->rq_lock);
+	spin_lock(&entity->lock);
  	entity->stopped = true;
  	drm_sched_rq_remove_entity(entity->rq, entity);
-	spin_unlock(&entity->rq_lock);
+	spin_unlock(&entity->lock);
/* Make sure this entity is not used by the scheduler at the moment */
  	wait_for_completion(&entity->entity_idle);
@@ -396,9 +396,9 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
  void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
  				   enum drm_sched_priority priority)
  {
-	spin_lock(&entity->rq_lock);
+	spin_lock(&entity->lock);
  	entity->priority = priority;
-	spin_unlock(&entity->rq_lock);
+	spin_unlock(&entity->lock);
  }
  EXPORT_SYMBOL(drm_sched_entity_set_priority);
@@ -555,14 +555,14 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
  	if (fence && !dma_fence_is_signaled(fence))
  		return;
- spin_lock(&entity->rq_lock);
+	spin_lock(&entity->lock);
  	sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
  	rq = sched ? sched->sched_rq[entity->priority] : NULL;
  	if (rq != entity->rq) {
  		drm_sched_rq_remove_entity(entity->rq, entity);
  		entity->rq = rq;
  	}
-	spin_unlock(&entity->rq_lock);
+	spin_unlock(&entity->lock);
if (entity->num_sched_list == 1)
  		entity->sched_list = NULL;
@@ -602,9 +602,9 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
  		struct drm_sched_rq *rq;
/* Add the entity to the run queue */
-		spin_lock(&entity->rq_lock);
+		spin_lock(&entity->lock);
  		if (entity->stopped) {
-			spin_unlock(&entity->rq_lock);
+			spin_unlock(&entity->lock);
DRM_ERROR("Trying to push to a killed entity\n");
  			return;
@@ -619,7 +619,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
  		if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
  			drm_sched_rq_update_fifo_locked(entity, submit_ts);
- spin_unlock(&entity->rq_lock);
+		spin_unlock(&entity->lock);
drm_sched_wakeup(sched, entity);
  	}
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 54c5fe7a7d1d..937e7d1cfc49 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -165,7 +165,7 @@ static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *enti
void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, ktime_t ts)
  {
-	lockdep_assert_held(&entity->rq_lock);
+	lockdep_assert_held(&entity->lock);
spin_lock(&entity->rq->lock); @@ -186,9 +186,9 @@ void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
  	 * for entity from within concurrent drm_sched_entity_select_rq and the
  	 * other to update the rb tree structure.
  	 */
-	spin_lock(&entity->rq_lock);
+	spin_lock(&entity->lock);
  	drm_sched_rq_update_fifo_locked(entity, ts);
-	spin_unlock(&entity->rq_lock);
+	spin_unlock(&entity->lock);
  }
/**
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index d4a3ba333568..5a1e4c803b90 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -96,6 +96,14 @@ struct drm_sched_entity {
  	 */
  	struct list_head		list;
+ /**
+	 * @lock:
+	 *
+	 * Lock protecting the run-queue (@rq) to which this entity belongs,
+	 * @priority and the list of schedulers (@sched_list, @num_sched_list).
+	 */
+	spinlock_t			lock;
+
  	/**
  	 * @rq:
  	 *
@@ -140,13 +148,6 @@ struct drm_sched_entity {
  	 */
  	enum drm_sched_priority         priority;
- /**
-	 * @rq_lock:
-	 *
-	 * Lock to modify the runqueue to which this entity belongs.
-	 */
-	spinlock_t			rq_lock;
-
  	/**
  	 * @job_queue: the list of jobs of this entity.
  	 */




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux