RE: [RFC PATCH 4/5] scsi: ufs: L2P map management for HPB read

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



> 
> A pinned region is a pre-set regions on the UFS device that is always
> activate-state and
This sentence got cut off

> 
> The data structure for map data request and L2P map uses mempool API,
> minimizing allocation overhead while avoiding static allocation.

Maybe one or two more sentences to explain the L2P framework:
Each hpb lun maintains 2 "to-do" lists: 
 - hpb->lh_inact_rgn - regions to be inactivated, and 
 - hpb->lh_act_srgn - subregions to be activated
Those lists are being checked on every resume and completion interrupt.

> 
> Signed-off-by: Daejun Park <daejun7.park@xxxxxxxxxxx>
> ---
> +       for (i = 0; i < hpb->pages_per_srgn; i++) {
> +               mctx->m_page[i] = mempool_alloc(ufshpb_drv.ufshpb_page_pool,
> +                                               GFP_KERNEL);
> +               memset(page_address(mctx->m_page[i]), 0, PAGE_SIZE);
Better move this memset after if (!mctx->m_page[i]).
And maybe use clear_page instead?

> +               if (!mctx->m_page[i]) {
> +                       for (j = 0; j < i; j++)
> +                               mempool_free(mctx->m_page[j],
> +                                            ufshpb_drv.ufshpb_page_pool);
> +                       goto release_ppn_dirty;
> +               }


> +static inline int ufshpb_add_region(struct ufshpb_lu *hpb,
> +                                   struct ufshpb_region *rgn)
> +{
Maybe better describe what this function does - ufshpb_get_rgn_map_ctx ?

> +
> +static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region
> *rgn)
> +{
> +       unsigned long flags;
> +       int ret = 0;
> +
> +       spin_lock_irqsave(&hpb->hpb_state_lock, flags);
> +       if (rgn->rgn_state == HPB_RGN_PINNED) {
> +               dev_warn(&hpb->hpb_lu_dev,
> +                        "pinned region cannot drop-out. region %d\n",
> +                        rgn->rgn_idx);
> +               goto out;
> +       }
> +
> +       if (!list_empty(&rgn->list_lru_rgn)) {
> +               if (ufshpb_check_issue_state_srgns(hpb, rgn)) {
So if one of its subregions has inflight map request - you add it to the "starved" list?
Why call it starved?


> +static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
> +                               struct ufshpb_region *rgn,
> +                               struct ufshpb_subregion *srgn)
> +{
> +       struct ufshpb_req *map_req;
> +       unsigned long flags;
> +       int ret = 0;
> +
> +       spin_lock_irqsave(&hpb->hpb_state_lock, flags);
> +       /*
> +        * Since the region state change occurs only in the hpb task-work,
> +        * the state of the region cannot HPB_RGN_INACTIVE at this point.
> +        * The region state must be changed in the hpb task-work
I think that you called this worker map_work?


> +               spin_unlock_irqrestore(&hpb->hpb_state_lock, flags);
> +               ret = ufshpb_add_region(hpb, rgn);
If this is not an active region,
Although the device indicated to activate a specific subregion, 
You are activating all the subregions of that region.
You should elaborate on that in your commit log,
and explain why this is the correct activation course.

> +       /*
> +        * If the active region and the inactive region are the same,
> +        * we will inactivate this region.
> +        * The device could check this (region inactivated) and
> +        * will response the proper active region information
> +        */
> +       spin_lock(&hpb->rsp_list_lock);
> +       for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
> +               rgn_idx =
> +                       be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
> +               srgn_idx =
> +                       be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
get_unaligned instead of be16_to_cpu ?

> +
> +               dev_dbg(&hpb->hpb_lu_dev, "activate(%d) region %d - %d\n",
> +                       i, rgn_idx, srgn_idx);
> +               ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
> +               atomic_inc(&hpb->stats.rb_active_cnt);
> +       }
> +
> +       for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
> +               rgn_idx = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
> +               dev_dbg(&hpb->hpb_lu_dev, "inactivate(%d) region %d\n",
> +                       i, rgn_idx);
> +               ufshpb_update_inactive_info(hpb, rgn_idx);
> +               atomic_inc(&hpb->stats.rb_inactive_cnt);
> +       }
> +       spin_unlock(&hpb->rsp_list_lock);
> +
> +       dev_dbg(&hpb->hpb_lu_dev, "Noti: #ACT %u #INACT %u\n",
> +               rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
> +
> +       queue_work(ufshpb_drv.ufshpb_wq, &hpb->map_work);
> +}
> +
> +/* routine : isr (ufs) */
> +static void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
> +{
> +       struct ufshpb_lu *hpb;
> +       struct ufshpb_rsp_field *rsp_field;
> +       int data_seg_len, ret;
> +
> +       data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
> +               & MASK_RSP_UPIU_DATA_SEG_LEN;
get_unaligned instead of be32_to_cpu ?

> +
> +       if (!data_seg_len) {
data_seg_len should be DEV_DATA_SEG_LEN, and you should also check HPB_UPDATE_ALERT,
which you might want to do here and not in ufshpb_may_field_valid

> +               if (!ufshpb_is_general_lun(lrbp->lun))
> +                       return;
> +
> +               hpb = ufshpb_get_hpb_data(lrbp->cmd);
> +               ret = ufshpb_lu_get(hpb);
> +               if (ret)
> +                       return;
> +
> +               if (!ufshpb_is_empty_rsp_lists(hpb))
> +                       queue_work(ufshpb_drv.ufshpb_wq, &hpb->map_work);
> +
> +               goto put_hpb;
> +       }
> +
> +       rsp_field = ufshpb_get_hpb_rsp(lrbp);
> +       if (ufshpb_may_field_valid(hba, lrbp, rsp_field))
> +               return;
> +
> +       hpb = ufshpb_get_hpb_data(lrbp->cmd);
> +       ret = ufshpb_lu_get(hpb);
> +       if (ret)
> +               return;
> +
> +       atomic_inc(&hpb->stats.rb_noti_cnt);
> +
> +       switch (rsp_field->hpb_type) {
> +       case HPB_RSP_REQ_REGION_UPDATE:
> +               WARN_ON(data_seg_len != DEV_DATA_SEG_LEN);
> +               ufshpb_rsp_req_region_update(hpb, rsp_field);
> +               break;
What about hpb dev reset - oper 0x2?


> +       default:
> +               dev_notice(&hpb->hpb_lu_dev, "hpb_type is not available: %d\n",
> +                          rsp_field->hpb_type);
> +               break;
> +       }
> +put_hpb:
> +       ufshpb_lu_put(hpb);
> +}
> +


> +static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
> +                                  struct ufshpb_region *rgn,
> +                                  struct ufshpb_subregion *srgn)
> +{
> +       if (!list_empty(&rgn->list_inact_rgn))
> +               return;
> +
> +       if (!list_empty(&srgn->list_act_srgn)) {
> +               list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
Why is this needed?
Why updating this subregion position?

> +               return;
> +       }
> +
> +       list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
> +}


> @@ -195,8 +1047,15 @@ static int ufshpb_alloc_region_tbl(struct ufs_hba
> *hba, struct ufshpb_lu *hpb)
>  release_srgn_table:
>         for (i = 0; i < rgn_idx; i++) {
>                 rgn = rgn_table + i;
> -               if (rgn->srgn_tbl)
> +               if (rgn->srgn_tbl) {
> +                       for (srgn_idx = 0; srgn_idx < rgn->srgn_cnt;
> +                            srgn_idx++) {
> +                               srgn = rgn->srgn_tbl + srgn_idx;
> +                               if (srgn->mctx)
How is it even possible that on init there is an active subregion?
ufshpb_init_pinned_active_region does its own cleanup.

> +       hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
> +                         sizeof(struct page *) * hpb->pages_per_srgn,
> +                         0, 0, NULL);
What is the advantage in using an array of page pointers,
Instead of a single pointer to pages_per_srgn?

 

> @@ -398,6 +1326,9 @@ static void ufshpb_resume(struct ufs_hba *hba)
> 
>                 dev_info(&hpb->hpb_lu_dev, "ufshpb resume");
>                 ufshpb_set_state(hpb, HPB_PRESENT);
> +               if (!ufshpb_is_empty_rsp_lists(hpb))
> +                       queue_work(ufshpb_drv.ufshpb_wq, &hpb->map_work);
Ahha - so you are using the ufs driver pm flows to poll your work queue.
Why device recommendations isn't enough?

> +
>                 ufshpb_lu_put(hpb);
>         }
>  }

Thanks,
Avri




[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [SCSI Target Devel]     [Linux SCSI Target Infrastructure]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Samba]     [Device Mapper]

  Powered by Linux