From: Ketil Johnsen <ketil.johnsen@xxxxxxx> In the kernel submission flow, GROUP_SUBMIT will ensure that the group gets assigned to a CSG. Conversely for the user submission flow, work gets added to the ring buffer without kernel supervision, so there needs to be a mechanism to trigger rescheduling. Use a new GROUP_KICK ioctl, to keep it distinct from the existing submit flow. Signed-off-by: Ketil Johnsen <ketil.johnsen@xxxxxxx> Signed-off-by: Mihail Atanassov <mihail.atanassov@xxxxxxx> --- drivers/gpu/drm/panthor/panthor_drv.c | 12 +++++++++++ drivers/gpu/drm/panthor/panthor_sched.c | 27 +++++++++++++++++++++++++ drivers/gpu/drm/panthor/panthor_sched.h | 1 + 3 files changed, 40 insertions(+) diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index e391ab6aaab2..ce2fdcd3fb42 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -1305,6 +1305,17 @@ static int panthor_ioctl_vm_get_state(struct drm_device *ddev, void *data, return 0; } +static int panthor_ioctl_group_kick(struct drm_device *ddev, void *data, + struct drm_file *file) +{ + struct drm_panthor_group_kick *args = data; + struct panthor_file *pfile = file->driver_priv; + + panthor_sched_kick(pfile, args->handle, args->queue_mask); + + return 0; +} + static int panthor_open(struct drm_device *ddev, struct drm_file *file) { @@ -1375,6 +1386,7 @@ static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = { PANTHOR_IOCTL(TILER_HEAP_CREATE, tiler_heap_create, DRM_RENDER_ALLOW), PANTHOR_IOCTL(TILER_HEAP_DESTROY, tiler_heap_destroy, DRM_RENDER_ALLOW), PANTHOR_IOCTL(GROUP_SUBMIT, group_submit, DRM_RENDER_ALLOW), + PANTHOR_IOCTL(GROUP_KICK, group_kick, DRM_RENDER_ALLOW), }; static int panthor_mmap(struct file *filp, struct vm_area_struct *vma) diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 471bb8f2b44c..3b56526a4b97 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -2845,6 +2845,33 @@ void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed) } } +void panthor_sched_kick(struct panthor_file *pfile, u32 group_handle, u32 queue_mask) +{ + struct panthor_group_pool *gpool = pfile->groups; + struct panthor_scheduler *sched = pfile->ptdev->scheduler; + struct panthor_group *group; + + group = group_get(xa_load(&gpool->xa, group_handle)); + if (!group) + return; + + if (!group->queue_count) + goto err_put_group; + + mutex_lock(&sched->lock); + + if (group->csg_id < 0) + group_schedule_locked(group, queue_mask); + else + /* All queues share same doorbell page (for now), so we just need to ding one */ + gpu_write(pfile->ptdev, CSF_DOORBELL(group->queues[0]->doorbell_id), 1); + + mutex_unlock(&sched->lock); + +err_put_group: + group_put(group); +} + static void group_sync_upd_work(struct work_struct *work) { struct panthor_group *group = diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h index 0b3a2ee2a0ad..18fb7ad0952e 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.h +++ b/drivers/gpu/drm/panthor/panthor_sched.h @@ -48,5 +48,6 @@ void panthor_sched_report_mmu_fault(struct panthor_device *ptdev); void panthor_sched_report_fw_events(struct panthor_device *ptdev, u32 events); u8 panthor_sched_doorbell_id(struct panthor_file *pfile, u32 group_handle); +void panthor_sched_kick(struct panthor_file *pfile, u32 group_handle, u32 queue_mask); #endif -- 2.45.0