The sdma_queue_count was only used for inferring whether we should unmap SDMA queues under HWS mode. In contrast, We mapped active queues rather than all in map_queues_cpsch(). In order to match the map and unmap for SDMA queues, we should just count active SDMA queues. Meanwhile, rename sdma_queue_count to active_sdma_queue_count to reflect the new usage. Change-Id: I9f1c3305dad044a3c779ec0730fcf7554050de8b Signed-off-by: Yong Zhao <Yong.Zhao@xxxxxxx> --- .../drm/amd/amdkfd/kfd_device_queue_manager.c | 54 ++++++++----------- .../drm/amd/amdkfd/kfd_device_queue_manager.h | 5 +- .../amd/amdkfd/kfd_process_queue_manager.c | 16 +++--- 3 files changed, 31 insertions(+), 44 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 064108cf493b..cf77b866054a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -138,6 +138,10 @@ void increment_queue_count(struct device_queue_manager *dqm, dqm->active_queue_count++; if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ) dqm->active_cp_queue_count++; + else if (type == KFD_QUEUE_TYPE_SDMA) + dqm->active_sdma_queue_count++; + else if (type == KFD_QUEUE_TYPE_SDMA_XGMI) + dqm->active_xgmi_sdma_queue_count++; } void decrement_queue_count(struct device_queue_manager *dqm, @@ -146,6 +150,10 @@ void decrement_queue_count(struct device_queue_manager *dqm, dqm->active_queue_count--; if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ) dqm->active_cp_queue_count--; + else if (type == KFD_QUEUE_TYPE_SDMA) + dqm->active_sdma_queue_count--; + else if (type == KFD_QUEUE_TYPE_SDMA_XGMI) + dqm->active_xgmi_sdma_queue_count--; } static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) @@ -377,11 +385,6 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, if (q->properties.is_active) increment_queue_count(dqm, q->properties.type); - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) - dqm->sdma_queue_count++; - else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) - dqm->xgmi_sdma_queue_count++; - /* * Unconditionally increment this counter, regardless of the queue's * type or whether the queue is active. @@ -462,15 +465,13 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; - if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { + if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) deallocate_hqd(dqm, q); - } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - dqm->sdma_queue_count--; + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) deallocate_sdma_queue(dqm, q); - } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { - dqm->xgmi_sdma_queue_count--; + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) deallocate_sdma_queue(dqm, q); - } else { + else { pr_debug("q->properties.type %d is invalid\n", q->properties.type); return -EINVAL; @@ -916,8 +917,8 @@ static int initialize_nocpsch(struct device_queue_manager *dqm) mutex_init(&dqm->lock_hidden); INIT_LIST_HEAD(&dqm->queues); dqm->active_queue_count = dqm->next_pipe_to_allocate = 0; - dqm->sdma_queue_count = 0; - dqm->xgmi_sdma_queue_count = 0; + dqm->active_sdma_queue_count = 0; + dqm->active_xgmi_sdma_queue_count = 0; for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { int pipe_offset = pipe * get_queues_per_pipe(dqm); @@ -1081,8 +1082,8 @@ static int initialize_cpsch(struct device_queue_manager *dqm) mutex_init(&dqm->lock_hidden); INIT_LIST_HEAD(&dqm->queues); dqm->active_queue_count = dqm->processes_count = 0; - dqm->sdma_queue_count = 0; - dqm->xgmi_sdma_queue_count = 0; + dqm->active_sdma_queue_count = 0; + dqm->active_xgmi_sdma_queue_count = 0; dqm->active_runlist = false; dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm)); dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm)); @@ -1254,11 +1255,6 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, list_add(&q->list, &qpd->queues_list); qpd->queue_count++; - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) - dqm->sdma_queue_count++; - else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) - dqm->xgmi_sdma_queue_count++; - if (q->properties.is_active) { increment_queue_count(dqm, q->properties.type); @@ -1367,9 +1363,9 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, return retval; pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n", - dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count); + dqm->active_sdma_queue_count, dqm->active_xgmi_sdma_queue_count); - if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count) + if (dqm->active_sdma_queue_count > 0 || dqm->active_xgmi_sdma_queue_count) unmap_sdma_queues(dqm); retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, @@ -1444,13 +1440,10 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, deallocate_doorbell(qpd, q); - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - dqm->sdma_queue_count--; + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) deallocate_sdma_queue(dqm, q); - } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { - dqm->xgmi_sdma_queue_count--; + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) deallocate_sdma_queue(dqm, q); - } list_del(&q->list); qpd->queue_count--; @@ -1673,13 +1666,10 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, /* Clear all user mode queues */ list_for_each_entry(q, &qpd->queues_list, list) { - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - dqm->sdma_queue_count--; + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) deallocate_sdma_queue(dqm, q); - } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { - dqm->xgmi_sdma_queue_count--; + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) deallocate_sdma_queue(dqm, q); - } if (q->properties.is_active) decrement_queue_count(dqm, q->properties.type); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 05e0afc04cd9..62472a186d4f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -182,8 +182,9 @@ struct device_queue_manager { unsigned int processes_count; unsigned int active_queue_count; unsigned int active_cp_queue_count; - unsigned int sdma_queue_count; - unsigned int xgmi_sdma_queue_count; + unsigned int cp_queue_count; + unsigned int active_sdma_queue_count; + unsigned int active_xgmi_sdma_queue_count; unsigned int total_queue_count; unsigned int next_pipe_to_allocate; unsigned int *allocated_queues; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index c604a2ede3f5..941b5876f19f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -241,16 +241,12 @@ int pqm_create_queue(struct process_queue_manager *pqm, switch (type) { case KFD_QUEUE_TYPE_SDMA: case KFD_QUEUE_TYPE_SDMA_XGMI: - if ((type == KFD_QUEUE_TYPE_SDMA && dev->dqm->sdma_queue_count - >= get_num_sdma_queues(dev->dqm)) || - (type == KFD_QUEUE_TYPE_SDMA_XGMI && - dev->dqm->xgmi_sdma_queue_count - >= get_num_xgmi_sdma_queues(dev->dqm))) { - pr_debug("Over-subscription is not allowed for SDMA.\n"); - retval = -EPERM; - goto err_create_queue; - } - + /* SDMA queues are always allocated statically no matter + * which scheduler mode is used. We also do not need to + * check whether a SDMA queue can be allocated here, because + * allocate_sdma_queue() in create_queue() has the + * corresponding check logic there. + */ retval = init_user_queue(pqm, dev, &q, properties, f, *qid); if (retval != 0) goto err_create_queue; -- 2.17.1 _______________________________________________ amd-gfx mailing list amd-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/amd-gfx