Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@xxxxxxxxxxxxxxx> On 1/7/2025 6:32 PM, Maciej Falkowski wrote: > From: Karol Wachowski <karol.wachowski@xxxxxxxxx> > > Control explicit command queue management capability bit based on > scheduling mode. Capability will be available only when hardware > scheduling mode is set. > > There is no point of allowing user space to create and destroy command > queues with OS schedling mode because FW does not support all required > functionalities for correct command queue management with OS scheduling. > > Return -ENODEV from command queue create/destroy/submit IOCTLs. > > Remove is_valid field from struct ivpu_job_cmdq > > Signed-off-by: Karol Wachowski <karol.wachowski@xxxxxxxxx> > Signed-off-by: Maciej Falkowski <maciej.falkowski@xxxxxxxxxxxxxxx> > --- > drivers/accel/ivpu/ivpu_drv.c | 19 +++++++------------ > drivers/accel/ivpu/ivpu_drv.h | 1 + > drivers/accel/ivpu/ivpu_job.c | 24 +++++++++++------------- > drivers/accel/ivpu/ivpu_job.h | 1 - > 4 files changed, 19 insertions(+), 26 deletions(-) > > diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c > index 9b0d99873fb3..6a80d626d609 100644 > --- a/drivers/accel/ivpu/ivpu_drv.c > +++ b/drivers/accel/ivpu/ivpu_drv.c > @@ -127,23 +127,18 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link) > kref_put(&file_priv->ref, file_priv_release); > } > > -static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args) > +bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability) > { > - switch (args->index) { > + switch (capability) { > case DRM_IVPU_CAP_METRIC_STREAMER: > - args->value = 1; > - break; > + return true; > case DRM_IVPU_CAP_DMA_MEMORY_RANGE: > - args->value = 1; > - break; > + return true; > case DRM_IVPU_CAP_MANAGE_CMDQ: > - args->value = 1; > - break; > + return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW; > default: > - return -EINVAL; > + return false; > } > - > - return 0; > } > > static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) > @@ -203,7 +198,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f > args->value = vdev->hw->sku; > break; > case DRM_IVPU_PARAM_CAPABILITIES: > - ret = ivpu_get_capabilities(vdev, args); > + args->value = ivpu_is_capable(vdev, args->index); > break; > default: > ret = -EINVAL; > diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h > index b57d878f2fcd..d53902b34070 100644 > --- a/drivers/accel/ivpu/ivpu_drv.h > +++ b/drivers/accel/ivpu/ivpu_drv.h > @@ -213,6 +213,7 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link); > int ivpu_boot(struct ivpu_device *vdev); > int ivpu_shutdown(struct ivpu_device *vdev); > void ivpu_prepare_for_reset(struct ivpu_device *vdev); > +bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability); > > static inline u8 ivpu_revision(struct ivpu_device *vdev) > { > diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c > index c55de9736d84..37ea92eb4b25 100644 > --- a/drivers/accel/ivpu/ivpu_job.c > +++ b/drivers/accel/ivpu/ivpu_job.c > @@ -123,7 +123,6 @@ static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 p > > cmdq->priority = priority; > cmdq->is_legacy = is_legacy; > - cmdq->is_valid = true; > > ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit, > &file_priv->cmdq_id_next, GFP_KERNEL); > @@ -307,7 +306,7 @@ static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u32 > lockdep_assert_held(&file_priv->lock); > > cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id); > - if (!cmdq || !cmdq->is_valid) { > + if (!cmdq) { > ivpu_warn_ratelimited(vdev, "Failed to find command queue with ID: %u\n", cmdq_id); > return NULL; > } > @@ -832,6 +831,9 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file * > struct ivpu_file_priv *file_priv = file->driver_priv; > struct drm_ivpu_cmdq_submit *args = data; > > + if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) > + return -ENODEV; > + > if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID) > return -EINVAL; > > @@ -857,6 +859,9 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file * > struct drm_ivpu_cmdq_create *args = data; > struct ivpu_cmdq *cmdq; > > + if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) > + return -ENODEV; > + > if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) > return -EINVAL; > > @@ -880,24 +885,17 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file > u32 cmdq_id; > int ret = 0; > > + if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) > + return -ENODEV; > + > mutex_lock(&file_priv->lock); > > cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id); > - if (!cmdq || !cmdq->is_valid || cmdq->is_legacy) { > + if (!cmdq || cmdq->is_legacy) { > ret = -ENOENT; > goto unlock; > } > > - /* > - * There is no way to stop executing jobs per command queue > - * in OS scheduling mode, mark command queue as invalid instead > - * and it will be freed together with context release. > - */ > - if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_OS) { > - cmdq->is_valid = false; > - goto unlock; > - } > - > cmdq_id = cmdq->id; > ivpu_cmdq_destroy(file_priv, cmdq); > ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id); > diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h > index ff77ee1fcee2..2e301c2eea7b 100644 > --- a/drivers/accel/ivpu/ivpu_job.h > +++ b/drivers/accel/ivpu/ivpu_job.h > @@ -31,7 +31,6 @@ struct ivpu_cmdq { > u32 id; > u32 db_id; > u8 priority; > - bool is_valid; > bool is_legacy; > }; >