In contexts like suspend, shutdown and error handling, we need to
suspend
devfreq to make sure these contexts won't be disturbed by clock
scaling.
However, suspending devfreq is not enough since users can still
trigger a
clock scaling by manipulating the sysfs node clkscale_enable and
devfreq
sysfs nodes like min/max_freq and governor. Add one more flag in
struct
clk_scaling such that these contexts can prevent clock scaling from
being
invoked through above sysfs nodes.
Signed-off-by: Can Guo <cang@xxxxxxxxxxxxxx>
---
drivers/scsi/ufs/ufshcd.c | 83
+++++++++++++++++++++++++++++++----------------
drivers/scsi/ufs/ufshcd.h | 2 ++
2 files changed, 57 insertions(+), 28 deletions(-)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 0c148fc..4ccdd2b 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1147,12 +1147,22 @@ static int ufshcd_clock_scaling_prepare(struct
ufs_hba *hba)
*/
ufshcd_scsi_block_requests(hba);
down_write(&hba->clk_scaling_lock);
- if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+
+ if (!hba->clk_scaling.is_allowed)
+ ret = -EAGAIN;
+ else if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US))
ret = -EBUSY;
+
+ if (ret) {
up_write(&hba->clk_scaling_lock);
ufshcd_scsi_unblock_requests(hba);
+ goto out;
}
+ /* let's not get into low power until clock scaling is completed */
+ ufshcd_hold(hba, false);
+
+out:
return ret;
}
@@ -1160,6 +1170,7 @@ static void
ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
{
up_write(&hba->clk_scaling_lock);
ufshcd_scsi_unblock_requests(hba);
+ ufshcd_release(hba);
}
/**
@@ -1175,12 +1186,9 @@ static int ufshcd_devfreq_scale(struct ufs_hba
*hba, bool scale_up)
{
int ret = 0;
- /* let's not get into low power until clock scaling is completed */
- ufshcd_hold(hba, false);
-
ret = ufshcd_clock_scaling_prepare(hba);
if (ret)
- goto out;
+ return ret;
/* scale down the gear before scaling down clocks */
if (!scale_up) {
@@ -1212,8 +1220,6 @@ static int ufshcd_devfreq_scale(struct ufs_hba
*hba, bool scale_up)
out_unprepare:
ufshcd_clock_scaling_unprepare(hba);
-out:
- ufshcd_release(hba);
return ret;
}
@@ -1487,7 +1493,7 @@ static ssize_t
ufshcd_clkscale_enable_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n",
hba->clk_scaling.is_allowed);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
hba->clk_scaling.is_enabled);
}
static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
@@ -1496,12 +1502,20 @@ static ssize_t
ufshcd_clkscale_enable_store(struct device *dev,
struct ufs_hba *hba = dev_get_drvdata(dev);
u32 value;
int err;
+ unsigned long flags;
+ bool update = true;
if (kstrtou32(buf, 0, &value))
return -EINVAL;
value = !!value;
- if (value == hba->clk_scaling.is_allowed)
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (value == hba->clk_scaling.is_enabled)
+ update = false;
+ else
+ hba->clk_scaling.is_enabled = value;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (!update)
goto out;
pm_runtime_get_sync(hba->dev);
@@ -1510,8 +1524,6 @@ static ssize_t
ufshcd_clkscale_enable_store(struct device *dev,
cancel_work_sync(&hba->clk_scaling.suspend_work);
cancel_work_sync(&hba->clk_scaling.resume_work);
- hba->clk_scaling.is_allowed = value;
-
if (value) {
ufshcd_resume_clkscaling(hba);
} else {
@@ -1845,8 +1857,6 @@ static void ufshcd_init_clk_scaling(struct
ufs_hba *hba)
snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
hba->host->host_no);
hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
-
- ufshcd_clkscaling_init_sysfs(hba);
}
static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
@@ -1854,6 +1864,8 @@ static void ufshcd_exit_clk_scaling(struct
ufs_hba *hba)
if (!ufshcd_is_clkscaling_supported(hba))
return;
+ if (hba->devfreq)
+ device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
destroy_workqueue(hba->clk_scaling.workq);
ufshcd_devfreq_remove(hba);
}
@@ -1918,7 +1930,7 @@ static void ufshcd_clk_scaling_start_busy(struct
ufs_hba *hba)
if (!hba->clk_scaling.active_reqs++)
queue_resume_work = true;
- if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
+ if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
return;
if (queue_resume_work)
@@ -4987,7 +4999,8 @@ static void __ufshcd_transfer_req_compl(struct
ufs_hba *hba,
complete(hba->dev_cmd.complete);
}
}
- if (ufshcd_is_clkscaling_supported(hba))
+ if (ufshcd_is_clkscaling_supported(hba) &&
+ hba->clk_scaling.active_reqs > 0)
hba->clk_scaling.active_reqs--;
}
@@ -5650,18 +5663,25 @@ static void ufshcd_err_handling_prepare(struct
ufs_hba *hba)
ufshcd_vops_resume(hba, UFS_RUNTIME_PM);
} else {
ufshcd_hold(hba, false);
- if (hba->clk_scaling.is_allowed) {
+ if (hba->clk_scaling.is_enabled) {
cancel_work_sync(&hba->clk_scaling.suspend_work);
cancel_work_sync(&hba->clk_scaling.resume_work);
ufshcd_suspend_clkscaling(hba);
}
}
+ down_write(&hba->clk_scaling_lock);
+ hba->clk_scaling.is_allowed = false;
+ up_write(&hba->clk_scaling_lock);
}
static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
{
ufshcd_release(hba);
- if (hba->clk_scaling.is_allowed)
+
+ down_write(&hba->clk_scaling_lock);
+ hba->clk_scaling.is_allowed = true;
+ up_write(&hba->clk_scaling_lock);
+ if (hba->clk_scaling.is_enabled)
ufshcd_resume_clkscaling(hba);
pm_runtime_put(hba->dev);
}
@@ -7620,12 +7640,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
sizeof(struct ufs_pa_layer_attr));
hba->clk_scaling.saved_pwr_info.is_valid = true;
if (!hba->devfreq) {
+ hba->clk_scaling.is_allowed = true;