Re: [PATCH 3/4] drm/sched: Add internal job peek/pop API

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, 2025-02-05 at 15:33 +0000, Tvrtko Ursulin wrote:
> Idea is to add helpers for peeking and poppling jobs from entities

s/poppling/popping

> with
> the goal of decoupling the hidden assumption in the code that
> queue_node
> is the first element in struct drm_sched_job.
> 
> That assumption usually comes in the form of:
> 
>   while ((job = to_drm_sched_job(spsc_queue_pop(&entity-
> >job_queue))))
> 
> Which breaks if the queue_node is re-positioned due to_drm_sched_job
> being implemented with a container_of.
> 
> This also allows us to remove duplicate definition of
> to_drm_sched_job
> which the scheduler apparently tried to keep internal, but failed
> since
> in one of the previous patches we also removed a copy from amdgpu.
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxxx>

This one LGTM, +1

> Cc: Christian König <christian.koenig@xxxxxxx>
> Cc: Danilo Krummrich <dakr@xxxxxxxxxx>
> Cc: Matthew Brost <matthew.brost@xxxxxxxxx>
> Cc: Philipp Stanner <phasta@xxxxxxxxxx>
> ---
>  drivers/gpu/drm/scheduler/sched_entity.c   | 11 +++---
>  drivers/gpu/drm/scheduler/sched_internal.h | 43
> ++++++++++++++++++++++
>  drivers/gpu/drm/scheduler/sched_main.c     |  9 ++---
>  3 files changed, 52 insertions(+), 11 deletions(-)
>  create mode 100644 drivers/gpu/drm/scheduler/sched_internal.h
> 
> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c
> b/drivers/gpu/drm/scheduler/sched_entity.c
> index 69bcf0e99d57..737feff147a5 100644
> --- a/drivers/gpu/drm/scheduler/sched_entity.c
> +++ b/drivers/gpu/drm/scheduler/sched_entity.c
> @@ -28,11 +28,10 @@
>  #include <drm/drm_print.h>
>  #include <drm/gpu_scheduler.h>
>  
> +#include "sched_internal.h"
> +
>  #include "gpu_scheduler_trace.h"
>  
> -#define to_drm_sched_job(sched_job)		\
> -		container_of((sched_job), struct drm_sched_job,
> queue_node)
> -
>  /**
>   * drm_sched_entity_init - Init a context entity used by scheduler
> when
>   * submit to HW ring.
> @@ -255,7 +254,7 @@ static void drm_sched_entity_kill(struct
> drm_sched_entity *entity)
>  	/* The entity is guaranteed to not be used by the scheduler
> */
>  	prev = rcu_dereference_check(entity->last_scheduled, true);
>  	dma_fence_get(prev);
> -	while ((job = to_drm_sched_job(spsc_queue_pop(&entity-
> >job_queue)))) {
> +	while ((job = __drm_sched_entity_queue_pop(entity))) {
>  		struct drm_sched_fence *s_fence = job->s_fence;
>  
>  		dma_fence_get(&s_fence->finished);
> @@ -477,7 +476,7 @@ struct drm_sched_job
> *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
>  {
>  	struct drm_sched_job *sched_job;
>  
> -	sched_job = to_drm_sched_job(spsc_queue_peek(&entity-
> >job_queue));
> +	sched_job = __drm_sched_entity_queue_peek(entity);
>  	if (!sched_job)
>  		return NULL;
>  
> @@ -513,7 +512,7 @@ struct drm_sched_job
> *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
>  	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
>  		struct drm_sched_job *next;
>  
> -		next = to_drm_sched_job(spsc_queue_peek(&entity-
> >job_queue));
> +		next = __drm_sched_entity_queue_peek(entity);
>  		if (next) {
>  			struct drm_sched_rq *rq;
>  
> diff --git a/drivers/gpu/drm/scheduler/sched_internal.h
> b/drivers/gpu/drm/scheduler/sched_internal.h
> new file mode 100644
> index 000000000000..565c83e32371
> --- /dev/null
> +++ b/drivers/gpu/drm/scheduler/sched_internal.h
> @@ -0,0 +1,43 @@
> +
> +
> +/**
> + * __drm_sched_entity_queue_pop - Low level helper for popping
> queued jobs
> + *
> + * @entity: scheduler entity
> + *
> + * Low level helper for popping queued jobs.
> + *
> + * Returns the job dequeued or NULL.
> + */
> +static inline struct drm_sched_job *
> +__drm_sched_entity_queue_pop(struct drm_sched_entity *entity)
> +{
> +	struct spsc_node *node;
> +
> +	node = spsc_queue_pop(&entity->job_queue);
> +	if (!node)
> +		return NULL;
> +
> +	return container_of(node, struct drm_sched_job, queue_node);
> +}
> +
> +/**
> + * __drm_sched_entity_queue_peek - Low level helper for peeking at
> the job queue
> + *
> + * @entity: scheduler entity
> + *
> + * Low level helper for peeking at the job queue
> + *
> + * Returns the job at the head of the queue or NULL.
> + */
> +static inline struct drm_sched_job *
> +__drm_sched_entity_queue_peek(struct drm_sched_entity *entity)
> +{
> +	struct spsc_node *node;
> +
> +	node = spsc_queue_peek(&entity->job_queue);
> +	if (!node)
> +		return NULL;
> +
> +	return container_of(node, struct drm_sched_job, queue_node);
> +}
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c
> b/drivers/gpu/drm/scheduler/sched_main.c
> index 0363655db22d..41d6f839748e 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -78,6 +78,8 @@
>  #include <drm/gpu_scheduler.h>
>  #include <drm/spsc_queue.h>
>  
> +#include "sched_internal.h"
> +
>  #define CREATE_TRACE_POINTS
>  #include "gpu_scheduler_trace.h"
>  
> @@ -87,9 +89,6 @@ static struct lockdep_map drm_sched_lockdep_map = {
>  };
>  #endif
>  
> -#define to_drm_sched_job(sched_job)		\
> -		container_of((sched_job), struct drm_sched_job,
> queue_node)
> -
>  int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
>  
>  /**
> @@ -123,7 +122,7 @@ static bool drm_sched_can_queue(struct
> drm_gpu_scheduler *sched,
>  {
>  	struct drm_sched_job *s_job;
>  
> -	s_job = to_drm_sched_job(spsc_queue_peek(&entity-
> >job_queue));
> +	s_job = __drm_sched_entity_queue_peek(entity);
>  	if (!s_job)
>  		return false;
>  
> @@ -728,7 +727,7 @@ void drm_sched_cancel_all_jobs(struct
> drm_gpu_scheduler *sched, int errno)
>  
>  		spin_lock(&rq->lock);
>  		list_for_each_entry(entity, &rq->entities, list) {
> -			while ((job =
> to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
> +			while ((job =
> __drm_sched_entity_queue_pop(entity))) {
>  				s_fence = job->s_fence;
>  				dma_fence_signal(&s_fence-
> >scheduled);
>  				dma_fence_set_error(&s_fence-
> >finished, errno);





[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux