The expectation is that the struct drm_device based logging helpers get passed an actual struct drm_device pointer rather than some random struct pointer where you can dereference the ->dev member. Convert drm_err(sched, ...) to dev_err(sched->dev, ...) and similar. This matches current usage, as struct drm_device is not available, but drops "[drm]" or "[drm] *ERROR*" prefix from logging. Unfortunately, there's no dev_WARN_ON(), so the conversion is not exactly the same. Signed-off-by: Jani Nikula <jani.nikula@xxxxxxxxx> --- Cc: Matthew Brost <matthew.brost@xxxxxxxxx> Cc: Danilo Krummrich <dakr@xxxxxxxxxx> Cc: Philipp Stanner <phasta@xxxxxxxxxx> Cc: "Christian König" <ckoenig.leichtzumerken@xxxxxxxxx> Cc: dri-devel@xxxxxxxxxxxxxxxxxxxxx --- drivers/gpu/drm/scheduler/sched_entity.c | 2 +- drivers/gpu/drm/scheduler/sched_main.c | 20 +++++++++++--------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 69bcf0e99d57..e29af71d4b5c 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -92,7 +92,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, * the lowest priority available. */ if (entity->priority >= sched_list[0]->num_rqs) { - drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n", + dev_err(sched_list[0]->dev, "entity with out-of-bounds priority:%u num_rqs:%u\n", entity->priority, sched_list[0]->num_rqs); entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1, (s32) DRM_SCHED_PRIORITY_KERNEL); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index a48be16ab84f..d1c1f22fd1db 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -103,9 +103,9 @@ static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched) { u32 credits; - drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit, - atomic_read(&sched->credit_count), - &credits)); + WARN_ON(check_sub_overflow(sched->credit_limit, + atomic_read(&sched->credit_count), + &credits)); return credits; } @@ -130,9 +130,11 @@ static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched, /* If a job exceeds the credit limit, truncate it to the credit limit * itself to guarantee forward progress. */ - if (drm_WARN(sched, s_job->credits > sched->credit_limit, - "Jobs may not exceed the credit limit, truncate.\n")) + if (s_job->credits > sched->credit_limit) { + dev_WARN(sched->dev, + "Jobs may not exceed the credit limit, truncate.\n"); s_job->credits = sched->credit_limit; + } return drm_sched_available_credits(sched) >= s_job->credits; } @@ -790,7 +792,7 @@ int drm_sched_job_init(struct drm_sched_job *job, * or worse--a blank screen--leave a trail in the * logs, so this can be debugged easier. */ - drm_err(job->sched, "%s: entity has no rq!\n", __func__); + dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__); return -ENOENT; } @@ -1280,7 +1282,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, if (num_rqs > DRM_SCHED_PRIORITY_COUNT) { /* This is a gross violation--tell drivers what the problem is. */ - drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n", + dev_err(sched->dev, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n", __func__); return -EINVAL; } else if (sched->sched_rq) { @@ -1288,7 +1290,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, * fine-tune their DRM calling order, and return all * is good. */ - drm_warn(sched, "%s: scheduler already initialized!\n", __func__); + dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__); return 0; } @@ -1343,7 +1345,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, Out_check_own: if (sched->own_submit_wq) destroy_workqueue(sched->submit_wq); - drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__); + dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__); return -ENOMEM; } EXPORT_SYMBOL(drm_sched_init); -- 2.39.5