> > +static void ufshpb_read_to_handler(struct work_struct *work) > > +{ > > + struct delayed_work *dwork = to_delayed_work(work); > > + struct ufshpb_lu *hpb; > > + struct victim_select_info *lru_info; > > + struct ufshpb_region *rgn; > > + unsigned long flags; > > + LIST_HEAD(expired_list); > > + > > + hpb = container_of(dwork, struct ufshpb_lu, ufshpb_read_to_work); > > + > > + spin_lock_irqsave(&hpb->rgn_state_lock, flags); > > + > > + lru_info = &hpb->lru_info; > > + > > + list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) { > > + bool timedout = ktime_after(ktime_get(), rgn->read_timeout); > > + > > + if (timedout) { > > + rgn->read_timeout_expiries--; > > + if (is_rgn_dirty(rgn) || > > + rgn->read_timeout_expiries == 0) > > + list_add(&rgn->list_expired_rgn, &expired_list); > > + else > > + rgn->read_timeout = ktime_add_ms(ktime_get(), > > + READ_TO_MS); > > + } > > + } > > + > > + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); > > + > > + list_for_each_entry(rgn, &expired_list, list_expired_rgn) { > > Here can be problematic - since you don't have the native expired_list > initialized > before use, if above loop did not insert anything to expired_list, it > shall become > a dead loop here. Not sure what you meant by native initialization. LIST_HEAD is statically initializing an empty list, resulting the same outcome as INIT_LIST_HEAD. > > And, which lock is protecting rgn->list_expired_rgn? If two > read_to_handler works > are running in parallel, one can be inserting it to its expired_list > while another > can be deleting it. The timeout handler, being a delayed work, is meant to run every polling period. Originally, I had it protected from 2 handlers running concurrently, But I removed it following Daejun's comment, which I accepted, Since it is always scheduled using the same polling period. Thanks, Avri > > Can Guo. > > > + list_del_init(&rgn->list_expired_rgn); > > + spin_lock_irqsave(&hpb->rsp_list_lock, flags); > > + ufshpb_update_inactive_info(hpb, rgn->rgn_idx); > > + hpb->stats.rb_inactive_cnt++; > > + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); > > + } > > + > > + ufshpb_kick_map_work(hpb); > > + > > + schedule_delayed_work(&hpb->ufshpb_read_to_work, > > + msecs_to_jiffies(POLLING_INTERVAL_MS)); > > +} > > + > > static void ufshpb_add_lru_info(struct victim_select_info *lru_info, > > struct ufshpb_region *rgn) > > { > > rgn->rgn_state = HPB_RGN_ACTIVE; > > list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); > > atomic_inc(&lru_info->active_cnt); > > + if (rgn->hpb->is_hcm) { > > + rgn->read_timeout = ktime_add_ms(ktime_get(), READ_TO_MS); > > + rgn->read_timeout_expiries = READ_TO_EXPIRIES; > > + } > > } > > > > static void ufshpb_hit_lru_info(struct victim_select_info *lru_info, > > @@ -1813,6 +1865,7 @@ static int ufshpb_alloc_region_tbl(struct > > ufs_hba *hba, struct ufshpb_lu *hpb) > > > > INIT_LIST_HEAD(&rgn->list_inact_rgn); > > INIT_LIST_HEAD(&rgn->list_lru_rgn); > > + INIT_LIST_HEAD(&rgn->list_expired_rgn); > > > > if (rgn_idx == hpb->rgns_per_lu - 1) { > > srgn_cnt = ((hpb->srgns_per_lu - 1) % > > @@ -1834,6 +1887,7 @@ static int ufshpb_alloc_region_tbl(struct > > ufs_hba *hba, struct ufshpb_lu *hpb) > > } > > > > rgn->rgn_flags = 0; > > + rgn->hpb = hpb; > > } > > > > return 0; > > @@ -2053,6 +2107,8 @@ static int ufshpb_lu_hpb_init(struct ufs_hba > > *hba, struct ufshpb_lu *hpb) > > ufshpb_normalization_work_handler); > > INIT_WORK(&hpb->ufshpb_lun_reset_work, > > ufshpb_reset_work_handler); > > + INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work, > > + ufshpb_read_to_handler); > > } > > > > hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache", > > @@ -2087,6 +2143,10 @@ static int ufshpb_lu_hpb_init(struct ufs_hba > > *hba, struct ufshpb_lu *hpb) > > ufshpb_stat_init(hpb); > > ufshpb_param_init(hpb); > > > > + if (hpb->is_hcm) > > + schedule_delayed_work(&hpb->ufshpb_read_to_work, > > + msecs_to_jiffies(POLLING_INTERVAL_MS)); > > + > > return 0; > > > > release_pre_req_mempool: > > @@ -2154,6 +2214,7 @@ static void ufshpb_discard_rsp_lists(struct > > ufshpb_lu *hpb) > > static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb) > > { > > if (hpb->is_hcm) { > > + cancel_delayed_work_sync(&hpb->ufshpb_read_to_work); > > cancel_work_sync(&hpb->ufshpb_lun_reset_work); > > cancel_work_sync(&hpb->ufshpb_normalization_work); > > } > > @@ -2264,6 +2325,10 @@ void ufshpb_resume(struct ufs_hba *hba) > > continue; > > ufshpb_set_state(hpb, HPB_PRESENT); > > ufshpb_kick_map_work(hpb); > > + if (hpb->is_hcm) > > + schedule_delayed_work(&hpb->ufshpb_read_to_work, > > + msecs_to_jiffies(POLLING_INTERVAL_MS)); > > + > > } > > } > > > > diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h > > index 37c1b0ea0c0a..b49e9a34267f 100644 > > --- a/drivers/scsi/ufs/ufshpb.h > > +++ b/drivers/scsi/ufs/ufshpb.h > > @@ -109,6 +109,7 @@ struct ufshpb_subregion { > > }; > > > > struct ufshpb_region { > > + struct ufshpb_lu *hpb; > > struct ufshpb_subregion *srgn_tbl; > > enum HPB_RGN_STATE rgn_state; > > int rgn_idx; > > @@ -126,6 +127,10 @@ struct ufshpb_region { > > /* region reads - for host mode */ > > spinlock_t rgn_lock; > > unsigned int reads; > > + /* region "cold" timer - for host mode */ > > + ktime_t read_timeout; > > + unsigned int read_timeout_expiries; > > + struct list_head list_expired_rgn; > > }; > > > > #define for_each_sub_region(rgn, i, srgn) \ > > @@ -219,6 +224,7 @@ struct ufshpb_lu { > > struct victim_select_info lru_info; > > struct work_struct ufshpb_normalization_work; > > struct work_struct ufshpb_lun_reset_work; > > + struct delayed_work ufshpb_read_to_work; > > > > /* pinned region information */ > > u32 lu_pinned_start;