Re: [PATCH 1/6] drm/sched: Add internal job peek/pop API

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, 2025-02-12 at 12:30 +0000, Tvrtko Ursulin wrote:
> 
> On 12/02/2025 10:40, Philipp Stanner wrote:
> > On Wed, 2025-02-12 at 09:32 +0000, Tvrtko Ursulin wrote:
> > > 
> > > On 12/02/2025 09:02, Philipp Stanner wrote:
> > > > On Fri, 2025-02-07 at 14:50 +0000, Tvrtko Ursulin wrote:
> > > > > Idea is to add helpers for peeking and popping jobs from
> > > > > entities
> > > > > with
> > > > > the goal of decoupling the hidden assumption in the code that
> > > > > queue_node
> > > > > is the first element in struct drm_sched_job.
> > > > > 
> > > > > That assumption usually comes in the form of:
> > > > > 
> > > > >     while ((job = to_drm_sched_job(spsc_queue_pop(&entity-
> > > > > > job_queue))))
> > > > > 
> > > > > Which breaks if the queue_node is re-positioned due
> > > > > to_drm_sched_job
> > > > > being implemented with a container_of.
> > > > > 
> > > > > This also allows us to remove duplicate definitions of
> > > > > to_drm_sched_job.
> > > > > 
> > > > > Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxxx>
> > > > > Cc: Christian König <christian.koenig@xxxxxxx>
> > > > > Cc: Danilo Krummrich <dakr@xxxxxxxxxx>
> > > > > Cc: Matthew Brost <matthew.brost@xxxxxxxxx>
> > > > > Cc: Philipp Stanner <phasta@xxxxxxxxxx>
> > > > > ---
> > > > >    drivers/gpu/drm/scheduler/sched_entity.c   | 11 +++---
> > > > >    drivers/gpu/drm/scheduler/sched_internal.h | 46
> > > > > ++++++++++++++++++++++
> > > > >    drivers/gpu/drm/scheduler/sched_main.c     |  7 ++--
> > > > >    3 files changed, 54 insertions(+), 10 deletions(-)
> > > > >    create mode 100644
> > > > > drivers/gpu/drm/scheduler/sched_internal.h
> > > > > 
> > > > > diff --git a/drivers/gpu/drm/scheduler/sched_entity.c
> > > > > b/drivers/gpu/drm/scheduler/sched_entity.c
> > > > > index 69bcf0e99d57..a171f05ad761 100644
> > > > > --- a/drivers/gpu/drm/scheduler/sched_entity.c
> > > > > +++ b/drivers/gpu/drm/scheduler/sched_entity.c
> > > > > @@ -28,11 +28,10 @@
> > > > >    #include <drm/drm_print.h>
> > > > >    #include <drm/gpu_scheduler.h>
> > > > >    
> > > > > +#include "sched_internal.h"
> > > > > +
> > > > >    #include "gpu_scheduler_trace.h"
> > > > >    
> > > > > -#define to_drm_sched_job(sched_job)		\
> > > > > -		container_of((sched_job), struct
> > > > > drm_sched_job,
> > > > > queue_node)
> > > > > -
> > > > >    /**
> > > > >     * drm_sched_entity_init - Init a context entity used by
> > > > > scheduler
> > > > > when
> > > > >     * submit to HW ring.
> > > > > @@ -255,7 +254,7 @@ static void drm_sched_entity_kill(struct
> > > > > drm_sched_entity *entity)
> > > > >    	/* The entity is guaranteed to not be used by the
> > > > > scheduler
> > > > > */
> > > > >    	prev = rcu_dereference_check(entity->last_scheduled,
> > > > > true);
> > > > >    	dma_fence_get(prev);
> > > > > -	while ((job =
> > > > > to_drm_sched_job(spsc_queue_pop(&entity-
> > > > > > job_queue)))) {
> > > > > +	while ((job = drm_sched_entity_queue_pop(entity))) {
> > > > >    		struct drm_sched_fence *s_fence = job-
> > > > > >s_fence;
> > > > >    
> > > > >    		dma_fence_get(&s_fence->finished);
> > > > > @@ -477,7 +476,7 @@ struct drm_sched_job
> > > > > *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
> > > > >    {
> > > > >    	struct drm_sched_job *sched_job;
> > > > >    
> > > > > -	sched_job =
> > > > > to_drm_sched_job(spsc_queue_peek(&entity-
> > > > > > job_queue));
> > > > > +	sched_job = drm_sched_entity_queue_peek(entity);
> > > > >    	if (!sched_job)
> > > > >    		return NULL;
> > > > >    
> > > > > @@ -513,7 +512,7 @@ struct drm_sched_job
> > > > > *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
> > > > >    	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
> > > > >    		struct drm_sched_job *next;
> > > > >    
> > > > > -		next =
> > > > > to_drm_sched_job(spsc_queue_peek(&entity-
> > > > > > job_queue));
> > > > > +		next = drm_sched_entity_queue_peek(entity);
> > > > >    		if (next) {
> > > > >    			struct drm_sched_rq *rq;
> > > > >    
> > > > > diff --git a/drivers/gpu/drm/scheduler/sched_internal.h
> > > > > b/drivers/gpu/drm/scheduler/sched_internal.h
> > > > > new file mode 100644
> > > > > index 000000000000..25ac62ac2bf3
> > > > > --- /dev/null
> > > > > +++ b/drivers/gpu/drm/scheduler/sched_internal.h
> > > > > @@ -0,0 +1,46 @@
> > > > > +#ifndef _DRM_GPU_SCHEDULER_INTERNAL_H_
> > > > > +#define _DRM_GPU_SCHEDULER_INTERNAL_H_
> > > > > +
> > > > > +/**
> > > > > + * drm_sched_entity_queue_pop - Low level helper for popping
> > > > > queued
> > > > > jobs
> > > > > + *
> > > > > + * @entity: scheduler entity
> > > > > + *
> > > > > + * Low level helper for popping queued jobs.
> > > > > + *
> > > > > + * Returns the job dequeued or NULL.
> > > > > + */
> > > > > +static inline struct drm_sched_job *
> > > > > +drm_sched_entity_queue_pop(struct drm_sched_entity *entity)
> > > > > +{
> > > > > +	struct spsc_node *node;
> > > > > +
> > > > > +	node = spsc_queue_pop(&entity->job_queue);
> > > > > +	if (!node)
> > > > > +		return NULL;
> > > > > +
> > > > > +	return container_of(node, struct drm_sched_job,
> > > > > queue_node);
> > > > > +}
> > > > > +
> > > > > +/**
> > > > > + * drm_sched_entity_queue_peek - Low level helper for
> > > > > peeking at
> > > > > the
> > > > > job queue
> > > > > + *
> > > > > + * @entity: scheduler entity
> > > > > + *
> > > > > + * Low level helper for peeking at the job queue
> > > > > + *
> > > > > + * Returns the job at the head of the queue or NULL.
> > > > 
> > > > I would like to (slowly) work towards a unified style regarding
> > > > the
> > > > docstrings. They're currently relatively inconsistent in
> > > > drm/sched.
> > > > 
> > > > I think we should do it that way:
> > > > 
> > > > ""
> > > > @entity: scheduler entity
> > > > 
> > > > Returns: the job at the head of the queue or NULL.
> > > > 
> > > > Low level helper for peeking at the the job queue.
> > > > ""
> > > 
> > > Returns before the description would be yet another new style,
> > > no?
> > > I's
> > > say that if we are churning lets follow
> > > Documentation/doc-guide/kernel-doc.rst.
> > 
> > Oh yes, you are right – official guideline demands "Return:" at the
> > end. So let's go for that for contributions.
> 
> So you want me to respin or are you okay with doing all scheduler
> kernel 
> doc in one patch afterwards?

Both's OK I guess. It's not a big deal, it's just one letter being
replaced. If you find some other nits you'd like to address you could
give a v5 with that change?

Rest of the series looks good to me. Having an ACK by AMD pro forma for
merging everything together would be nice, though

P.


> 
> Regards,
> 
> Tvrtko
> 
> > >   Or even consider sending a patch
> > > which churns everything at once.
> > > 
> > > Regards,
> > > 
> > > Tvrtko
> > > 
> > > > > + */
> > > > > +static inline struct drm_sched_job *
> > > > > +drm_sched_entity_queue_peek(struct drm_sched_entity *entity)
> > > > > +{
> > > > > +	struct spsc_node *node;
> > > > > +
> > > > > +	node = spsc_queue_peek(&entity->job_queue);
> > > > > +	if (!node)
> > > > > +		return NULL;
> > > > > +
> > > > > +	return container_of(node, struct drm_sched_job,
> > > > > queue_node);
> > > > > +}
> > > > > +
> > > > > +#endif
> > > > > diff --git a/drivers/gpu/drm/scheduler/sched_main.c
> > > > > b/drivers/gpu/drm/scheduler/sched_main.c
> > > > > index a48be16ab84f..9f614a775c49 100644
> > > > > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > > > > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > > > > @@ -78,6 +78,8 @@
> > > > >    #include <drm/gpu_scheduler.h>
> > > > >    #include <drm/spsc_queue.h>
> > > > >    
> > > > > +#include "sched_internal.h"
> > > > > +
> > > > >    #define CREATE_TRACE_POINTS
> > > > >    #include "gpu_scheduler_trace.h"
> > > > >    
> > > > > @@ -87,9 +89,6 @@ static struct lockdep_map
> > > > > drm_sched_lockdep_map
> > > > > = {
> > > > >    };
> > > > >    #endif
> > > > >    
> > > > > -#define to_drm_sched_job(sched_job)		\
> > > > > -		container_of((sched_job), struct
> > > > > drm_sched_job,
> > > > > queue_node)
> > > > > -
> > > > >    int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
> > > > >    
> > > > >    /**
> > > > > @@ -123,7 +122,7 @@ static bool drm_sched_can_queue(struct
> > > > > drm_gpu_scheduler *sched,
> > > > >    {
> > > > >    	struct drm_sched_job *s_job;
> > > > >    
> > > > > -	s_job = to_drm_sched_job(spsc_queue_peek(&entity-
> > > > > > job_queue));
> > > > > +	s_job = drm_sched_entity_queue_peek(entity);
> > > > >    	if (!s_job)
> > > > >    		return false;
> > > > >    
> > > > 
> > > 
> > 
> 





[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux