Now that we have a header file for internal scheduler interfaces we can move some more prototypes into it. By doing that we eliminate the chance of drivers trying to use something which was not intended to be used. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxxx> Cc: Christian König <christian.koenig@xxxxxxx> Cc: Danilo Krummrich <dakr@xxxxxxxxxx> Cc: Matthew Brost <matthew.brost@xxxxxxxxx> Cc: Philipp Stanner <phasta@xxxxxxxxxx> --- drivers/gpu/drm/scheduler/sched_fence.c | 2 ++ drivers/gpu/drm/scheduler/sched_internal.h | 30 ++++++++++++++++++++++ include/drm/gpu_scheduler.h | 27 ------------------- 3 files changed, 32 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index 0f35f009b9d3..e971528504a5 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -29,6 +29,8 @@ #include <drm/gpu_scheduler.h> +#include "sched_internal.h" + static struct kmem_cache *sched_fence_slab; static int __init drm_sched_fence_slab_init(void) diff --git a/drivers/gpu/drm/scheduler/sched_internal.h b/drivers/gpu/drm/scheduler/sched_internal.h index 9ff5cddb5708..a521faef9a3c 100644 --- a/drivers/gpu/drm/scheduler/sched_internal.h +++ b/drivers/gpu/drm/scheduler/sched_internal.h @@ -1,6 +1,36 @@ #ifndef _DRM_GPU_SCHEDULER_INTERNAL_H_ #define _DRM_GPU_SCHEDULER_INTERNAL_H_ + +/* Used to choose between FIFO and RR job-scheduling */ +extern int drm_sched_policy; + +#define DRM_SCHED_POLICY_RR 0 +#define DRM_SCHED_POLICY_FIFO 1 + +void drm_sched_wakeup(struct drm_gpu_scheduler *sched); + +void drm_sched_rq_add_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity); +void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity); + +void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, + struct drm_sched_rq *rq, ktime_t ts); + +void drm_sched_entity_select_rq(struct drm_sched_entity *entity); +struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); + +struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *s_entity, + void *owner); +void drm_sched_fence_init(struct drm_sched_fence *fence, + struct drm_sched_entity *entity); +void drm_sched_fence_free(struct drm_sched_fence *fence); + +void drm_sched_fence_scheduled(struct drm_sched_fence *fence, + struct dma_fence *parent); +void drm_sched_fence_finished(struct drm_sched_fence *fence, int result); + /** * drm_sched_entity_queue_pop - Low level helper for popping queued jobs * diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 962825613596..f45a9333ed04 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -71,12 +71,6 @@ enum drm_sched_priority { DRM_SCHED_PRIORITY_COUNT }; -/* Used to choose between FIFO and RR job-scheduling */ -extern int drm_sched_policy; - -#define DRM_SCHED_POLICY_RR 0 -#define DRM_SCHED_POLICY_FIFO 1 - /** * struct drm_sched_entity - A wrapper around a job queue (typically * attached to the DRM file_priv). @@ -575,7 +569,6 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched); void drm_sched_job_cleanup(struct drm_sched_job *job); -void drm_sched_wakeup(struct drm_gpu_scheduler *sched); bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched); void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched); void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched); @@ -585,14 +578,6 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); void drm_sched_increase_karma(struct drm_sched_job *bad); void drm_sched_fault(struct drm_gpu_scheduler *sched); -void drm_sched_rq_add_entity(struct drm_sched_rq *rq, - struct drm_sched_entity *entity); -void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, - struct drm_sched_entity *entity); - -void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, - struct drm_sched_rq *rq, ktime_t ts); - int drm_sched_entity_init(struct drm_sched_entity *entity, enum drm_sched_priority priority, struct drm_gpu_scheduler **sched_list, @@ -601,23 +586,11 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); void drm_sched_entity_fini(struct drm_sched_entity *entity); void drm_sched_entity_destroy(struct drm_sched_entity *entity); -void drm_sched_entity_select_rq(struct drm_sched_entity *entity); -struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); void drm_sched_entity_push_job(struct drm_sched_job *sched_job); void drm_sched_entity_set_priority(struct drm_sched_entity *entity, enum drm_sched_priority priority); int drm_sched_entity_error(struct drm_sched_entity *entity); -struct drm_sched_fence *drm_sched_fence_alloc( - struct drm_sched_entity *s_entity, void *owner); -void drm_sched_fence_init(struct drm_sched_fence *fence, - struct drm_sched_entity *entity); -void drm_sched_fence_free(struct drm_sched_fence *fence); - -void drm_sched_fence_scheduled(struct drm_sched_fence *fence, - struct dma_fence *parent); -void drm_sched_fence_finished(struct drm_sched_fence *fence, int result); - unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, unsigned long remaining); -- 2.48.0