> Thanks Felix. > I'll add it to the next tree instead of the one from the patch-set. > > Oded I based this patch on your current amdkfd-next. Do you have another "next tree"? Regards, Felix -----Original Message----- From: Oded Gabbay [mailto:oded.gabbay@xxxxxxxxx] Sent: Wednesday, October 18, 2017 12:36 AM To: Kuehling, Felix Cc: amd-gfx list Subject: Re: [PATCH] drm/amdkfd: Make queue mapping interfaces more consistent On Wed, Oct 18, 2017 at 5:55 AM, Felix Kuehling <Felix.Kuehling at amd.com> wrote: > Pass unmap filter parameters directly to execute_queues_cpsch, same as > unmap_queues_cpsch. > > Signed-off-by: Felix Kuehling <Felix.Kuehling at amd.com> > --- > .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 32 > +++++++++++----------- > 1 file changed, 16 insertions(+), 16 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c > b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c > index 933adb5..da3b743 100644 > --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c > +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c > @@ -45,7 +45,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, > struct qcm_process_device > *qpd); > > static int execute_queues_cpsch(struct device_queue_manager *dqm, > - bool static_queues_included); > + enum kfd_unmap_queues_filter filter, > + uint32_t filter_param); > static int unmap_queues_cpsch(struct device_queue_manager *dqm, > enum kfd_unmap_queues_filter filter, > uint32_t filter_param); @@ -741,7 > +742,7 @@ static int start_cpsch(struct device_queue_manager *dqm) > init_interrupts(dqm); > > mutex_lock(&dqm->lock); > - execute_queues_cpsch(dqm, false); > + execute_queues_cpsch(dqm, > + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); > mutex_unlock(&dqm->lock); > > return 0; > @@ -787,7 +788,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, > list_add(&kq->list, &qpd->priv_queue_list); > dqm->queue_count++; > qpd->is_debug = true; > - execute_queues_cpsch(dqm, false); > + execute_queues_cpsch(dqm, > + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); > mutex_unlock(&dqm->lock); > > return 0; > @@ -801,7 +802,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, > list_del(&kq->list); > dqm->queue_count--; > qpd->is_debug = false; > - execute_queues_cpsch(dqm, true); > + execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, > + 0); > /* > * Unconditionally decrement this counter, regardless of the queue's > * type. > @@ -859,7 +860,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, > qpd->queue_count++; > if (q->properties.is_active) { > dqm->queue_count++; > - retval = execute_queues_cpsch(dqm, false); > + retval = execute_queues_cpsch(dqm, > + > + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); > } > > if (q->properties.type == KFD_QUEUE_TYPE_SDMA) @@ -964,16 > +966,12 @@ static int unmap_queues_cpsch(struct device_queue_manager > *dqm, > > /* dqm->lock mutex has to be locked before calling this function */ > static int execute_queues_cpsch(struct device_queue_manager *dqm, > - bool static_queues_included) > + enum kfd_unmap_queues_filter filter, > + uint32_t filter_param) > { > int retval; > - enum kfd_unmap_queues_filter filter; > - > - filter = static_queues_included ? > - KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : > - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES; > > - retval = unmap_queues_cpsch(dqm, filter, 0); > + retval = unmap_queues_cpsch(dqm, filter, filter_param); > if (retval) { > pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n"); > return retval; > @@ -1024,7 +1022,8 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, > if (q->properties.is_active) > dqm->queue_count--; > > - retval = execute_queues_cpsch(dqm, false); > + retval = execute_queues_cpsch(dqm, > + > + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); > if (retval == -ETIME) > qpd->reset_wavefronts = true; > > @@ -1157,7 +1156,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, > struct kernel_queue *kq, *kq_next; > struct mqd_manager *mqd; > struct device_process_node *cur, *next_dpn; > - bool unmap_static_queues = false; > + enum kfd_unmap_queues_filter filter = > + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES; > > retval = 0; > > @@ -1169,7 +1169,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, > dqm->queue_count--; > qpd->is_debug = false; > dqm->total_queue_count--; > - unmap_static_queues = true; > + filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES; > } > > /* Clear all user mode queues */ @@ -1193,7 +1193,7 @@ static > int process_termination_cpsch(struct device_queue_manager *dqm, > } > } > > - retval = execute_queues_cpsch(dqm, unmap_static_queues); > + retval = execute_queues_cpsch(dqm, filter, 0); > if (retval || qpd->reset_wavefronts) { > pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev); > dbgdev_wave_reset_wavefronts(dqm->dev, > qpd->pqm->process); > -- > 2.7.4 > Thanks Felix. I'll add it to the next tree instead of the one from the patch-set. Oded