[PATCH 08/19] drm/amdkfd: Fix goto usage

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Sat, Aug 12, 2017 at 12:56 AM, Felix Kuehling <Felix.Kuehling at amd.com> wrote:
> From: Kent Russell <kent.russell at amd.com>
>
> Remove gotos that do not feature any common cleanup, and use gotos
> instead of repeating cleanup commands.
>
> According to kernel.org: "The goto statement comes in handy when a
> function exits from multiple locations and some common work such as
> cleanup has to be done. If there is no cleanup needed then just return
> directly."
>
> Signed-off-by: Kent Russell <kent.russell at amd.com>
> Signed-off-by: Felix Kuehling <Felix.Kuehling at amd.com>
> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_chardev.c           |  15 +--
>  .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c  | 102 ++++++++++-----------
>  drivers/gpu/drm/amd/amdkfd/kfd_module.c            |   3 +-
>  drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c    |  14 +--
>  .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c |  14 +--
>  5 files changed, 65 insertions(+), 83 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> index c22401e..7d78119 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> @@ -460,9 +460,8 @@ static int kfd_ioctl_dbg_register(struct file *filep,
>          */
>         pdd = kfd_bind_process_to_device(dev, p);
>         if (IS_ERR(pdd)) {
> -               mutex_unlock(kfd_get_dbgmgr_mutex());
> -               mutex_unlock(&p->mutex);
> -               return PTR_ERR(pdd);
> +               status = PTR_ERR(pdd);
> +               goto out;
>         }
>
>         if (!dev->dbgmgr) {
> @@ -480,6 +479,7 @@ static int kfd_ioctl_dbg_register(struct file *filep,
>                 status = -EINVAL;
>         }
>
> +out:
>         mutex_unlock(kfd_get_dbgmgr_mutex());
>         mutex_unlock(&p->mutex);
>
> @@ -580,8 +580,8 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
>         args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points;
>
>         if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) {
> -               kfree(args_buff);
> -               return -EINVAL;
> +               status = -EINVAL;
> +               goto out;
>         }
>
>         watch_mask_value = (uint64_t) args_buff[args_idx];
> @@ -604,8 +604,8 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
>         }
>
>         if (args_idx >= args->buf_size_in_bytes - sizeof(args)) {
> -               kfree(args_buff);
> -               return -EINVAL;
> +               status = -EINVAL;
> +               goto out;
>         }
>
>         /* Currently HSA Event is not supported for DBG */
> @@ -617,6 +617,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
>
>         mutex_unlock(kfd_get_dbgmgr_mutex());
>
> +out:
>         kfree(args_buff);
>
>         return status;
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> index df93531..2003a7e 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> @@ -150,7 +150,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
>                                 struct qcm_process_device *qpd,
>                                 int *allocated_vmid)
>  {
> -       int retval;
> +       int retval = 0;
This is redundant. retval will *always* be initialized by calling
create_*_queue_nocpsch() function later.

>
>         BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
>
> @@ -161,23 +161,21 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
>         if (dqm->total_queue_count >= max_num_of_queues_per_device) {
>                 pr_warn("Can't create new usermode queue because %d queues were already created\n",
>                                 dqm->total_queue_count);
> -               mutex_unlock(&dqm->lock);
> -               return -EPERM;
> +               retval = -EPERM;
> +               goto out_unlock;
>         }
>
>         if (list_empty(&qpd->queues_list)) {
>                 retval = allocate_vmid(dqm, qpd, q);
> -               if (retval) {
> -                       mutex_unlock(&dqm->lock);
> -                       return retval;
> -               }
> +               if (retval)
> +                       goto out_unlock;
>         }
>         *allocated_vmid = qpd->vmid;
>         q->properties.vmid = qpd->vmid;
>
>         if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
>                 retval = create_compute_queue_nocpsch(dqm, q, qpd);
> -       if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
> +       else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
>                 retval = create_sdma_queue_nocpsch(dqm, q, qpd);
Just for completeness (although it should never occur) , I think we should add :

else
      retval = -EINVAL;

>
>         if (retval) {
> @@ -185,8 +183,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
>                         deallocate_vmid(dqm, qpd, q);
>                         *allocated_vmid = 0;
>                 }
> -               mutex_unlock(&dqm->lock);
> -               return retval;
> +               goto out_unlock;
>         }
>
>         list_add(&q->list, &qpd->queues_list);
> @@ -204,8 +201,9 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
>         pr_debug("Total of %d queues are accountable so far\n",
>                         dqm->total_queue_count);
>
> +out_unlock:
>         mutex_unlock(&dqm->lock);
> -       return 0;
> +       return retval;
>  }
>
>  static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
> @@ -271,23 +269,25 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
>
>         retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
>                                 &q->gart_mqd_addr, &q->properties);
> -       if (retval) {
> -               deallocate_hqd(dqm, q);
> -               return retval;
> -       }
> +       if (retval)
> +               goto out_deallocate_hqd;
>
>         pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
>                         q->pipe, q->queue);
>
>         retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
>                         q->queue, (uint32_t __user *) q->properties.write_ptr);
> -       if (retval) {
> -               deallocate_hqd(dqm, q);
> -               mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
> -               return retval;
> -       }
> +       if (retval)
> +               goto out_uninit_mqd;
>
>         return 0;
> +
> +out_uninit_mqd:
> +       mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
> +out_deallocate_hqd:
> +       deallocate_hqd(dqm, q);
> +
> +       return retval;
>  }
>
>  static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
> @@ -366,8 +366,8 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
>         mqd = dqm->ops.get_mqd_manager(dqm,
>                         get_mqd_type_from_queue_type(q->properties.type));
>         if (!mqd) {
> -               mutex_unlock(&dqm->lock);
> -               return -ENOMEM;
> +               retval = -ENOMEM;
> +               goto out_unlock;
>         }
>
>         if (q->properties.is_active)
> @@ -387,6 +387,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
>         if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
>                 retval = execute_queues_cpsch(dqm, false);
>
> +out_unlock:
>         mutex_unlock(&dqm->lock);
>         return retval;
>  }
> @@ -500,16 +501,15 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
>
>         pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
>
> +       dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
> +                                       sizeof(unsigned int), GFP_KERNEL);
> +       if (!dqm->allocated_queues)
> +               return -ENOMEM;
> +
>         mutex_init(&dqm->lock);
>         INIT_LIST_HEAD(&dqm->queues);
>         dqm->queue_count = dqm->next_pipe_to_allocate = 0;
>         dqm->sdma_queue_count = 0;
> -       dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
> -                                       sizeof(unsigned int), GFP_KERNEL);
> -       if (!dqm->allocated_queues) {
> -               mutex_destroy(&dqm->lock);
> -               return -ENOMEM;
> -       }
>
>         for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
>                 int pipe_offset = pipe * get_queues_per_pipe(dqm);
> @@ -602,20 +602,22 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
>         dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
>         retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
>                                 &q->gart_mqd_addr, &q->properties);
> -       if (retval) {
> -               deallocate_sdma_queue(dqm, q->sdma_id);
> -               return retval;
> -       }
> +       if (retval)
> +               goto out_deallocate_sdma_queue;
>
>         retval = mqd->load_mqd(mqd, q->mqd, 0,
>                                 0, NULL);
> -       if (retval) {
> -               deallocate_sdma_queue(dqm, q->sdma_id);
> -               mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
> -               return retval;
> -       }
> +       if (retval)
> +               goto out_uninit_mqd;
>
>         return 0;
> +
> +out_uninit_mqd:
> +       mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
> +out_deallocate_sdma_queue:
> +       deallocate_sdma_queue(dqm, q->sdma_id);
> +
> +       return retval;
>  }
>
>  /*
> @@ -681,12 +683,8 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
>         dqm->active_runlist = false;
>         retval = dqm->ops_asic_specific.initialize(dqm);
>         if (retval)
> -               goto fail_init_pipelines;
> -
> -       return 0;
> +               mutex_destroy(&dqm->lock);
>
> -fail_init_pipelines:
> -       mutex_destroy(&dqm->lock);
>         return retval;
>  }
>
> @@ -846,8 +844,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
>                         get_mqd_type_from_queue_type(q->properties.type));
>
>         if (!mqd) {
> -               mutex_unlock(&dqm->lock);
> -               return -ENOMEM;
> +               retval = -ENOMEM;
> +               goto out;
>         }
>
>         dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
> @@ -1097,14 +1095,11 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
>                 uint64_t base = (uintptr_t)alternate_aperture_base;
>                 uint64_t limit = base + alternate_aperture_size - 1;
>
> -               if (limit <= base)
> -                       goto out;
> -
> -               if ((base & APE1_FIXED_BITS_MASK) != 0)
> -                       goto out;
> -
> -               if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT)
> +               if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
> +                  (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
> +                       retval = false;
>                         goto out;
> +               }
>
>                 qpd->sh_mem_ape1_base = base >> 16;
>                 qpd->sh_mem_ape1_limit = limit >> 16;
> @@ -1125,12 +1120,9 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
>                 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
>                 qpd->sh_mem_ape1_limit);
>
> -       mutex_unlock(&dqm->lock);
> -       return retval;
> -
>  out:
>         mutex_unlock(&dqm->lock);
> -       return false;
> +       return retval;
>  }
>
>  struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
> index 819a442..0d73bea 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
> @@ -105,7 +105,7 @@ static int __init kfd_module_init(void)
>
>         err = kfd_pasid_init();
>         if (err < 0)
> -               goto err_pasid;
> +               return err;
>
>         err = kfd_chardev_init();
>         if (err < 0)
> @@ -127,7 +127,6 @@ static int __init kfd_module_init(void)
>         kfd_chardev_exit();
>  err_ioctl:
>         kfd_pasid_exit();
> -err_pasid:
>         return err;
>  }
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
> index f3b8cc8..c4030b3 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
> @@ -442,6 +442,7 @@ int pm_send_set_resources(struct packet_manager *pm,
>                                 struct scheduling_resources *res)
>  {
>         struct pm4_set_resources *packet;
> +       int retval = 0;
>
>         BUG_ON(!pm || !res);
>
> @@ -450,9 +451,9 @@ int pm_send_set_resources(struct packet_manager *pm,
>                                         sizeof(*packet) / sizeof(uint32_t),
>                                         (unsigned int **)&packet);
>         if (!packet) {
> -               mutex_unlock(&pm->lock);
>                 pr_err("Failed to allocate buffer on kernel queue\n");
> -               return -ENOMEM;
> +               retval = -ENOMEM;
> +               goto out;
>         }
>
>         memset(packet, 0, sizeof(struct pm4_set_resources));
> @@ -475,9 +476,10 @@ int pm_send_set_resources(struct packet_manager *pm,
>
>         pm->priv_queue->ops.submit_packet(pm->priv_queue);
>
> +out:
>         mutex_unlock(&pm->lock);
>
> -       return 0;
> +       return retval;
>  }
>
>  int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
> @@ -555,9 +557,6 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
>         packet->data_lo = lower_32_bits((uint64_t)fence_value);
>
>         pm->priv_queue->ops.submit_packet(pm->priv_queue);
> -       mutex_unlock(&pm->lock);
> -
> -       return 0;
>
>  fail_acquire_packet_buffer:
>         mutex_unlock(&pm->lock);
> @@ -639,9 +638,6 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
>
>         pm->priv_queue->ops.submit_packet(pm->priv_queue);
>
> -       mutex_unlock(&pm->lock);
> -       return 0;
> -
>  err_acquire_packet_buffer:
>         mutex_unlock(&pm->lock);
>         return retval;
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> index d4f8bae..8432f5f 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> @@ -35,9 +35,8 @@ static inline struct process_queue_node *get_queue_by_qid(
>         BUG_ON(!pqm);
>
>         list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
> -               if (pqn->q && pqn->q->properties.queue_id == qid)
> -                       return pqn;
> -               if (pqn->kq && pqn->kq->queue->properties.queue_id == qid)
> +               if ((pqn->q && pqn->q->properties.queue_id == qid) ||
> +                   (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
>                         return pqn;
>         }
>
> @@ -113,8 +112,6 @@ static int create_cp_queue(struct process_queue_manager *pqm,
>  {
>         int retval;
>
> -       retval = 0;
> -
>         /* Doorbell initialized in user space*/
>         q_properties->doorbell_ptr = NULL;
>
> @@ -127,7 +124,7 @@ static int create_cp_queue(struct process_queue_manager *pqm,
>
>         retval = init_queue(q, q_properties);
>         if (retval != 0)
> -               goto err_init_queue;
> +               return retval;
>
>         (*q)->device = dev;
>         (*q)->process = pqm->process;
> @@ -135,9 +132,6 @@ static int create_cp_queue(struct process_queue_manager *pqm,
>         pr_debug("PQM After init queue");
>
>         return retval;
> -
> -err_init_queue:
> -       return retval;
>  }
>
>  int pqm_create_queue(struct process_queue_manager *pqm,
> @@ -181,7 +175,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
>                 list_for_each_entry(cur, &pdd->qpd.queues_list, list)
>                         num_queues++;
>                 if (num_queues >= dev->device_info->max_no_of_hqd/2)
> -                       return (-ENOSPC);
> +                       return -ENOSPC;
>         }
>
>         retval = find_available_queue_slot(pqm, qid);
> --
> 2.7.4
>

Excellent cleanup!
With the above comments fixed, this patch is:
Reviewed-by: Oded Gabbay <oded.gabbay at gmail.com>


[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux