Am 31.07.2018 um 12:37 schrieb Nayan Deshmukh:
The function selects the run queue from the rq_list with the
least load. The load is decided by the number of jobs in a
scheduler.
Signed-off-by: Nayan Deshmukh <nayan26deshmukh@xxxxxxxxx>
---
drivers/gpu/drm/scheduler/gpu_scheduler.c | 26 ++++++++++++++++++++++++++
1 file changed, 26 insertions(+)
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 375f6f7f6a93..c67f65ad8f15 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -255,6 +255,32 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
return true;
}
+/**
+ * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
+ *
+ * @entity: scheduler entity
+ *
+ * Return the pointer to the rq with least load.
+ */
+static struct drm_sched_rq *
+drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
+{
+ struct drm_sched_rq *rq = NULL;
+ unsigned int min_jobs = UINT_MAX;
+ int i;
+
+ for (i = 0; i < entity->num_rq_list; ++i) {
+ if (atomic_read(&entity->rq_list[i]->sched->num_jobs) <
+ min_jobs) {
+ min_jobs = atomic_read(
+ &entity->rq_list[i]->sched->num_jobs);
When you use atomic_read twice you might get different results because
the atomic has changed in the meantime.
In other words you need to store the result locally:
unsigned int num_jobs = atomic_read(....);
if (num_jobs < min_jobs) {
min_jobs = num_jobs;
.....
Christian.
+ rq = entity->rq_list[i];
+ }
+ }
+
+ return rq;
+}
+
static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct dma_fence_cb *cb)
{
_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/dri-devel