From: Peter Wang <peter.wang@xxxxxxxxxxxx>
There have a lockdep warning like below in current flow.
kworker/u16:0: Possible unsafe locking scenario:
kworker/u16:0: CPU0 CPU1
kworker/u16:0: ---- ----
kworker/u16:0: lock(&hba->clk_scaling_lock);
kworker/u16:0: lock(&hba->dev_cmd.lock);
kworker/u16:0: lock(&hba->clk_scaling_lock);
kworker/u16:0: lock(&hba->dev_cmd.lock);
kworker/u16:0:
Before this patch clk_scaling_lock was held in reader mode during the ufshcd_wb_toggle() call.
With this patch applied clk_scaling_lock is not held while ufshcd_wb_toggle() is called.
This is safe because ufshcd_wb_toggle will held clk_scaling_lock in reader mode "again" in flow
ufshcd_wb_toggle -> __ufshcd_wb_toggle -> ufshcd_query_flag_retry -> ufshcd_query_flag ->
ufshcd_exec_dev_cmd -> down_read(&hba->clk_scaling_lock);
The protect should enough and make sure clock is not change while send command.
ufshcd_wb_toggle can protected by hba->clk_scaling.is_allowed to make sure
ufshcd_devfreq_scale function not run concurrently.
Fixes: 0e9d4ca43ba8 ("scsi: ufs: Protect some contexts from unexpected clock scaling")
Signed-off-by: Peter Wang <peter.wang@xxxxxxxxxxxx>
---
drivers/ufs/core/ufshcd.c | 27 ++++++++++++++++-----------
1 file changed, 16 insertions(+), 11 deletions(-)
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index c7b337480e3e..aa57126fdb49 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -272,6 +272,7 @@ static void ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
+static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow);
static inline void ufshcd_enable_irq(struct ufs_hba *hba)
{
@@ -1249,12 +1250,10 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
return ret;
}
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
{
- if (writelock)
- up_write(&hba->clk_scaling_lock);
- else
- up_read(&hba->clk_scaling_lock);
+ up_write(&hba->clk_scaling_lock);
+
ufshcd_scsi_unblock_requests(hba);
ufshcd_release(hba);
}
@@ -1271,7 +1270,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
- bool is_writelock = true;
+ bool wb_toggle = false;
ret = ufshcd_clock_scaling_prepare(hba);
if (ret)
@@ -1300,13 +1299,19 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
}
}
- /* Enable Write Booster if we have scaled up else disable it */
- downgrade_write(&hba->clk_scaling_lock);
- is_writelock = false;
- ufshcd_wb_toggle(hba, scale_up);
+ /* Disable clk_scaling until ufshcd_wb_toggle finish */
+ hba->clk_scaling.is_allowed = false;
+ wb_toggle = true;
out_unprepare:
- ufshcd_clock_scaling_unprepare(hba, is_writelock);
+ ufshcd_clock_scaling_unprepare(hba);
+
+ /* Enable Write Booster if we have scaled up else disable it */
+ if (wb_toggle) {
+ ufshcd_wb_toggle(hba, scale_up);
+ ufshcd_clk_scaling_allow(hba, true);
+ }
+
return ret;
}
--
2.18.0