> > + /* if region is active but has no reads - inactivate it */ > > + spin_lock(&hpb->rsp_list_lock); > > + ufshpb_update_inactive_info(hpb, rgn->rgn_idx); > > Miss a hpb->stats.rb_inactive_cnt++ here? Thanks. Also noticed that since rb_inactive_cnt and rb_active_cnt are incremented now in more than one place - Need to protect that. Thanks, Avri > > Thanks, > Can Guo. > > > + spin_unlock(&hpb->rsp_list_lock); > > + } > > +} > > + > > static void ufshpb_map_work_handler(struct work_struct *work) > > { > > struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, > > map_work); > > @@ -1913,6 +1980,9 @@ static int ufshpb_lu_hpb_init(struct ufs_hba > > *hba, struct ufshpb_lu *hpb) > > INIT_LIST_HEAD(&hpb->list_hpb_lu); > > > > INIT_WORK(&hpb->map_work, ufshpb_map_work_handler); > > + if (hpb->is_hcm) > > + INIT_WORK(&hpb->ufshpb_normalization_work, > > + ufshpb_normalization_work_handler); > > > > hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache", > > sizeof(struct ufshpb_req), 0, 0, NULL); > > @@ -2012,6 +2082,8 @@ static void ufshpb_discard_rsp_lists(struct > > ufshpb_lu *hpb) > > > > static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb) > > { > > + if (hpb->is_hcm) > > + cancel_work_sync(&hpb->ufshpb_normalization_work); > > cancel_work_sync(&hpb->map_work); > > } > > > > diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h > > index 8119b1a3d1e5..bd4308010466 100644 > > --- a/drivers/scsi/ufs/ufshpb.h > > +++ b/drivers/scsi/ufs/ufshpb.h > > @@ -121,6 +121,10 @@ struct ufshpb_region { > > struct list_head list_lru_rgn; > > unsigned long rgn_flags; > > #define RGN_FLAG_DIRTY 0 > > + > > + /* region reads - for host mode */ > > + spinlock_t rgn_lock; > > + unsigned int reads; > > }; > > > > #define for_each_sub_region(rgn, i, srgn) \ > > @@ -211,6 +215,7 @@ struct ufshpb_lu { > > > > /* for selecting victim */ > > struct victim_select_info lru_info; > > + struct work_struct ufshpb_normalization_work; > > > > /* pinned region information */ > > u32 lu_pinned_start;