RE: [PATCH RFC v3 3/3] ufs: Simplify the clock scaling mechanism implementation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Some small nit:
ufshcd_scsi_{block,unblock}_requests are no longer using scsi_block_requests,
so maybe just omit the _scsi_ part of the name.

I think that you can add Bean's tested-by to this patch as well, because clock-scaling
Is happening all the time, in oppose to, e.g. task management, which is, on some
Platforms, hard to reproduced.

Thanks,
Avri

> 
> 
> Scaling the clock is only safe while no commands are in progress. Use
> blk_mq_{un,}freeze_queue() to block submission of new commands and to wait
> for ongoing commands to complete.
> 
> Cc: Yaniv Gardi <ygardi@xxxxxxxxxxxxxx>
> Cc: Stanley Chu <stanley.chu@xxxxxxxxxxxx>
> Cc: Avri Altman <avri.altman@xxxxxxx>
> Cc: Tomas Winkler <tomas.winkler@xxxxxxxxx>
> Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx>
> ---
>  drivers/scsi/ufs/ufshcd.c | 134 ++++++++++++--------------------------
>  drivers/scsi/ufs/ufshcd.h |   3 -
>  2 files changed, 43 insertions(+), 94 deletions(-)
> 
> diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index
> 2c6300fd5c75..d1912ba5d2d9 100644
> --- a/drivers/scsi/ufs/ufshcd.c
> +++ b/drivers/scsi/ufs/ufshcd.c
> @@ -292,14 +292,49 @@ static inline void ufshcd_disable_irq(struct ufs_hba
> *hba)
> 
>  static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)  {
> -       if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
> -               scsi_unblock_requests(hba->host);
> +       struct scsi_device *sdev;
> +
> +       blk_mq_unfreeze_queue(hba->tmf_queue);
> +       blk_mq_unfreeze_queue(hba->cmd_queue);
> +       shost_for_each_device(sdev, hba->host)
> +               blk_mq_unfreeze_queue(sdev->request_queue);
>  }
> 
> -static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
> +static int ufshcd_scsi_block_requests(struct ufs_hba *hba,
> +                                     unsigned long timeout)
>  {
> -       if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
> -               scsi_block_requests(hba->host);
> +       struct scsi_device *sdev;
> +       unsigned long deadline = jiffies + timeout;
> +
> +       if (timeout == ULONG_MAX) {
> +               shost_for_each_device(sdev, hba->host)
> +                       blk_mq_freeze_queue(sdev->request_queue);
> +               blk_mq_freeze_queue(hba->cmd_queue);
> +               blk_mq_freeze_queue(hba->tmf_queue);
> +               return 0;
> +       }
> +
> +       shost_for_each_device(sdev, hba->host)
> +               blk_freeze_queue_start(sdev->request_queue);
> +       blk_freeze_queue_start(hba->cmd_queue);
> +       blk_freeze_queue_start(hba->tmf_queue);
> +       shost_for_each_device(sdev, hba->host) {
> +               if (blk_mq_freeze_queue_wait_timeout(sdev->request_queue,
> +                               max_t(long, 0, deadline - jiffies)) <= 0) {
> +                       goto err;
> +               }
> +       }
> +       if (blk_mq_freeze_queue_wait_timeout(hba->cmd_queue,
> +                               max_t(long, 0, deadline - jiffies)) <= 0)
> +               goto err;
> +       if (blk_mq_freeze_queue_wait_timeout(hba->tmf_queue,
> +                               max_t(long, 0, deadline - jiffies)) <= 0)
> +               goto err;
> +       return 0;
> +
> +err:
> +       ufshcd_scsi_unblock_requests(hba);
> +       return -ETIMEDOUT;
>  }
> 
>  static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
> @@ -971,65 +1006,6 @@ static bool
> ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
>         return false;
>  }
> 
> -static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
> -                                       u64 wait_timeout_us)
> -{
> -       unsigned long flags;
> -       int ret = 0;
> -       u32 tm_doorbell;
> -       u32 tr_doorbell;
> -       bool timeout = false, do_last_check = false;
> -       ktime_t start;
> -
> -       ufshcd_hold(hba, false);
> -       spin_lock_irqsave(hba->host->host_lock, flags);
> -       /*
> -        * Wait for all the outstanding tasks/transfer requests.
> -        * Verify by checking the doorbell registers are clear.
> -        */
> -       start = ktime_get();
> -       do {
> -               if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
> -                       ret = -EBUSY;
> -                       goto out;
> -               }
> -
> -               tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
> -               tr_doorbell = ufshcd_readl(hba,
> REG_UTP_TRANSFER_REQ_DOOR_BELL);
> -               if (!tm_doorbell && !tr_doorbell) {
> -                       timeout = false;
> -                       break;
> -               } else if (do_last_check) {
> -                       break;
> -               }
> -
> -               spin_unlock_irqrestore(hba->host->host_lock, flags);
> -               schedule();
> -               if (ktime_to_us(ktime_sub(ktime_get(), start)) >
> -                   wait_timeout_us) {
> -                       timeout = true;
> -                       /*
> -                        * We might have scheduled out for long time so make
> -                        * sure to check if doorbells are cleared by this time
> -                        * or not.
> -                        */
> -                       do_last_check = true;
> -               }
> -               spin_lock_irqsave(hba->host->host_lock, flags);
> -       } while (tm_doorbell || tr_doorbell);
> -
> -       if (timeout) {
> -               dev_err(hba->dev,
> -                       "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
> -                       __func__, tm_doorbell, tr_doorbell);
> -               ret = -EBUSY;
> -       }
> -out:
> -       spin_unlock_irqrestore(hba->host->host_lock, flags);
> -       ufshcd_release(hba);
> -       return ret;
> -}
> -
>  /**
>   * ufshcd_scale_gear - scale up/down UFS gear
>   * @hba: per adapter instance
> @@ -1079,26 +1055,15 @@ static int ufshcd_scale_gear(struct ufs_hba *hba,
> bool scale_up)
> 
>  static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)  {
> -       #define DOORBELL_CLR_TOUT_US            (1000 * 1000) /* 1 sec */
> -       int ret = 0;
>         /*
>          * make sure that there are no outstanding requests when
>          * clock scaling is in progress
>          */
> -       ufshcd_scsi_block_requests(hba);
> -       down_write(&hba->clk_scaling_lock);
> -       if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
> -               ret = -EBUSY;
> -               up_write(&hba->clk_scaling_lock);
> -               ufshcd_scsi_unblock_requests(hba);
> -       }
> -
> -       return ret;
> +       return ufshcd_scsi_block_requests(hba, HZ);
>  }
> 
>  static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)  {
> -       up_write(&hba->clk_scaling_lock);
>         ufshcd_scsi_unblock_requests(hba);
>  }
> 
> @@ -1528,7 +1493,7 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
>                  */
>                 /* fallthrough */
>         case CLKS_OFF:
> -               ufshcd_scsi_block_requests(hba);
> +               ufshcd_scsi_block_requests(hba, ULONG_MAX);
>                 hba->clk_gating.state = REQ_CLKS_ON;
>                 trace_ufshcd_clk_gating(dev_name(hba->dev),
>                                         hba->clk_gating.state); @@ -2395,9 +2360,6 @@ static
> int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
>                 BUG();
>         }
> 
> -       if (!down_read_trylock(&hba->clk_scaling_lock))
> -               return SCSI_MLQUEUE_HOST_BUSY;
> -
>         spin_lock_irqsave(hba->host->host_lock, flags);
>         switch (hba->ufshcd_state) {
>         case UFSHCD_STATE_OPERATIONAL:
> @@ -2462,7 +2424,6 @@ static int ufshcd_queuecommand(struct Scsi_Host
> *host, struct scsi_cmnd *cmd)
>  out_unlock:
>         spin_unlock_irqrestore(hba->host->host_lock, flags);
>  out:
> -       up_read(&hba->clk_scaling_lock);
>         return err;
>  }
> 
> @@ -2616,8 +2577,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba
> *hba,
>         struct completion wait;
>         unsigned long flags;
> 
> -       down_read(&hba->clk_scaling_lock);
> -
>         /*
>          * Get free slot, sleep if slots are unavailable.
>          * Even though we use wait_event() which sleeps indefinitely, @@ -2652,7
> +2611,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
> 
>  out_put_tag:
>         blk_put_request(req);
> -       up_read(&hba->clk_scaling_lock);
>         return err;
>  }
> 
> @@ -5451,7 +5409,7 @@ static void ufshcd_check_errors(struct ufs_hba
> *hba)
>                 /* handle fatal errors only when link is functional */
>                 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
>                         /* block commands from scsi mid-layer */
> -                       ufshcd_scsi_block_requests(hba);
> +                       ufshcd_scsi_block_requests(hba, ULONG_MAX);
> 
>                         hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
> 
> @@ -5757,8 +5715,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct
> ufs_hba *hba,
>         unsigned long flags;
>         u32 upiu_flags;
> 
> -       down_read(&hba->clk_scaling_lock);
> -
>         req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
>         if (IS_ERR(req))
>                 return PTR_ERR(req);
> @@ -5838,7 +5794,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct
> ufs_hba *hba,
>         }
> 
>         blk_put_request(req);
> -       up_read(&hba->clk_scaling_lock);
>         return err;
>  }
> 
> @@ -8307,8 +8262,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem
> *mmio_base, unsigned int irq)
>         /* Initialize mutex for device management commands */
>         mutex_init(&hba->dev_cmd.lock);
> 
> -       init_rwsem(&hba->clk_scaling_lock);
> -
>         ufshcd_init_clk_gating(hba);
> 
>         ufshcd_init_clk_scaling(hba);
> @@ -8394,7 +8347,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem
> *mmio_base, unsigned int irq)
> 
>         /* Hold auto suspend until async scan completes */
>         pm_runtime_get_sync(dev);
> -       atomic_set(&hba->scsi_block_reqs_cnt, 0);
>         /*
>          * We are assuming that device wasn't put in sleep/power-down
>          * state exclusively during the boot stage before kernel.
> diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index
> 6e0b87e8f875..cea71057fa90 100644
> --- a/drivers/scsi/ufs/ufshcd.h
> +++ b/drivers/scsi/ufs/ufshcd.h
> @@ -517,7 +517,6 @@ struct ufs_stats {
>   * @urgent_bkops_lvl: keeps track of urgent bkops level for device
>   * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
>   *  device is known or not.
> - * @scsi_block_reqs_cnt: reference counting for scsi block requests
>   */
>  struct ufs_hba {
>         void __iomem *mmio_base;
> @@ -721,9 +720,7 @@ struct ufs_hba {
>         enum bkops_status urgent_bkops_lvl;
>         bool is_urgent_bkops_lvl_checked;
> 
> -       struct rw_semaphore clk_scaling_lock;
>         struct ufs_desc_size desc_size;
> -       atomic_t scsi_block_reqs_cnt;
> 
>         struct device           bsg_dev;
>         struct request_queue    *bsg_queue;
> --
> 2.24.0.rc1.363.gb1bccd3e3d-goog





[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [SCSI Target Devel]     [Linux SCSI Target Infrastructure]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Samba]     [Device Mapper]

  Powered by Linux