To avoid any list corruption. Signed-off-by: xinhui pan <xinhui.pan@xxxxxxx> --- .../drm/amd/amdkfd/kfd_device_queue_manager.c | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 63a9a19a3987..d62374746c93 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1722,7 +1722,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { int retval; - struct queue *q, *next; + struct queue *q; struct kernel_queue *kq, *kq_next; struct mqd_manager *mqd_mgr; struct device_process_node *cur, *next_dpn; @@ -1779,24 +1779,26 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, qpd->reset_wavefronts = false; } - dqm_unlock(dqm); - - /* Outside the DQM lock because under the DQM lock we can't do - * reclaim or take other locks that others hold while reclaiming. - */ - if (found) - kfd_dec_compute_active(dqm->dev); - /* Lastly, free mqd resources. * Do free_mqd() after dqm_unlock to avoid circular locking. */ - list_for_each_entry_safe(q, next, &qpd->queues_list, list) { + while (!list_empty(&qpd->queues_list)) { + q = list_first_entry(&qpd->queues_list, struct queue, list); mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; list_del(&q->list); qpd->queue_count--; + dqm_unlock(dqm); mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); + dqm_lock(dqm); } + dqm_unlock(dqm); + + /* Outside the DQM lock because under the DQM lock we can't do + * reclaim or take other locks that others hold while reclaiming. + */ + if (found) + kfd_dec_compute_active(dqm->dev); return retval; } -- 2.25.1 _______________________________________________ amd-gfx mailing list amd-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/amd-gfx