On 2021-03-22 14:55, Daejun Park wrote:
> This patch supports the HPB 2.0.
>
> The HPB 2.0 supports read of varying sizes from 4KB to 512KB.
> In the case of Read (<= 32KB) is supported as single HPB read.
> In the case of Read (36KB ~ 512KB) is supported by as a combination of
> write buffer command and HPB read command to deliver more PPN.
> The write buffer commands may not be issued immediately due to busy
> tags.
> To use HPB read more aggressively, the driver can requeue the write
> buffer
> command. The requeue threshold is implemented as timeout and can be
> modified with requeue_timeout_ms entry in sysfs.
>
> Signed-off-by: Daejun Park <daejun7.park@xxxxxxxxxxx>
> ---
> Documentation/ABI/testing/sysfs-driver-ufs | 47 +-
> drivers/scsi/ufs/ufs-sysfs.c | 4 +
> drivers/scsi/ufs/ufs.h | 3 +-
> drivers/scsi/ufs/ufshcd.c | 25 +-
> drivers/scsi/ufs/ufshcd.h | 7 +
> drivers/scsi/ufs/ufshpb.c | 626 +++++++++++++++++++--
> drivers/scsi/ufs/ufshpb.h | 67 ++-
> 7 files changed, 698 insertions(+), 81 deletions(-)
>
> diff --git a/Documentation/ABI/testing/sysfs-driver-ufs
> b/Documentation/ABI/testing/sysfs-driver-ufs
> index 528bf89fc98b..419adf450b89 100644
> --- a/Documentation/ABI/testing/sysfs-driver-ufs
> +++ b/Documentation/ABI/testing/sysfs-driver-ufs
> @@ -1253,14 +1253,14 @@ Description: This entry shows the number of
> HPB pinned regions assigned to
>
> The file is read only.
>
> -What: /sys/class/scsi_device/*/device/hpb_sysfs/hit_cnt
> +What: /sys/class/scsi_device/*/device/hpb_stat_sysfs/hit_cnt
> Date: March 2021
> Contact: Daejun Park <daejun7.park@xxxxxxxxxxx>
> Description: This entry shows the number of reads that changed to HPB
> read.
>
> The file is read only.
>
> -What: /sys/class/scsi_device/*/device/hpb_sysfs/miss_cnt
> +What: /sys/class/scsi_device/*/device/hpb_stat_sysfs/miss_cnt
> Date: March 2021
> Contact: Daejun Park <daejun7.park@xxxxxxxxxxx>
> Description: This entry shows the number of reads that cannot be
> changed to
> @@ -1268,7 +1268,7 @@ Description: This entry shows the number of
> reads that cannot be changed to
>
> The file is read only.
>
> -What: /sys/class/scsi_device/*/device/hpb_sysfs/rb_noti_cnt
> +What: /sys/class/scsi_device/*/device/hpb_stat_sysfs/rb_noti_cnt
> Date: March 2021
> Contact: Daejun Park <daejun7.park@xxxxxxxxxxx>
> Description: This entry shows the number of response UPIUs that has
> @@ -1276,7 +1276,7 @@ Description: This entry shows the number of
> response UPIUs that has
>
> The file is read only.
>
> -What: /sys/class/scsi_device/*/device/hpb_sysfs/rb_active_cnt
> +What: /sys/class/scsi_device/*/device/hpb_stat_sysfs/rb_active_cnt
> Date: March 2021
> Contact: Daejun Park <daejun7.park@xxxxxxxxxxx>
> Description: This entry shows the number of active sub-regions
> recommended by
> @@ -1284,7 +1284,7 @@ Description: This entry shows the number of
> active sub-regions recommended by
>
> The file is read only.
>
> -What: /sys/class/scsi_device/*/device/hpb_sysfs/rb_inactive_cnt
> +What: /sys/class/scsi_device/*/device/hpb_stat_sysfs/rb_inactive_cnt
> Date: March 2021
> Contact: Daejun Park <daejun7.park@xxxxxxxxxxx>
> Description: This entry shows the number of inactive regions
> recommended by
> @@ -1292,10 +1292,45 @@ Description: This entry shows the number of
> inactive regions recommended by
>
> The file is read only.
>
> -What: /sys/class/scsi_device/*/device/hpb_sysfs/map_req_cnt
> +What: /sys/class/scsi_device/*/device/hpb_stat_sysfs/map_req_cnt
> Date: March 2021
> Contact: Daejun Park <daejun7.park@xxxxxxxxxxx>
> Description: This entry shows the number of read buffer commands for
> activating sub-regions recommended by response UPIUs.
>
> The file is read only.
> +
> +What: /sys/class/scsi_device/*/device/hpb_param_sysfs/requeue_timeout_ms
> +Date: March 2021
> +Contact: Daejun Park <daejun7.park@xxxxxxxxxxx>
> +Description: This entry shows the requeue timeout threshold for write
> buffer
> + command in ms. This value can be changed by writing proper integer
> to
> + this entry.
> +
> +What: /sys/bus/platform/drivers/ufshcd/*/attributes/max_data_size_hpb_single_cmd
> +Date: March 2021
> +Contact: Daejun Park <daejun7.park@xxxxxxxxxxx>
> +Description: This entry shows the maximum HPB data size for using
> single HPB
> + command.
> +
> + === ========
> + 00h 4KB
> + 01h 8KB
> + 02h 12KB
> + ...
> + FFh 1024KB
> + === ========
> +
> + The file is read only.
> +
> +What: /sys/bus/platform/drivers/ufshcd/*/flags/wb_enable
> +Date: March 2021
> +Contact: Daejun Park <daejun7.park@xxxxxxxxxxx>
> +Description: This entry shows the status of HPB.
> +
> + == ============================
> + 0 HPB is not enabled.
> + 1 HPB is enabled
> + == ============================
> +
> + The file is read only.
> diff --git a/drivers/scsi/ufs/ufs-sysfs.c
> b/drivers/scsi/ufs/ufs-sysfs.c
> index 2546e7a1ac4f..92a883866e12 100644
> --- a/drivers/scsi/ufs/ufs-sysfs.c
> +++ b/drivers/scsi/ufs/ufs-sysfs.c
> @@ -782,6 +782,7 @@ UFS_FLAG(disable_fw_update,
> _PERMANENTLY_DISABLE_FW_UPDATE);
> UFS_FLAG(wb_enable, _WB_EN);
> UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN);
> UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8);
> +UFS_FLAG(hpb_enable, _HPB_EN);
>
> static struct attribute *ufs_sysfs_device_flags[] = {
> &dev_attr_device_init.attr,
> @@ -795,6 +796,7 @@ static struct attribute *ufs_sysfs_device_flags[] =
> {
> &dev_attr_wb_enable.attr,
> &dev_attr_wb_flush_en.attr,
> &dev_attr_wb_flush_during_h8.attr,
> + &dev_attr_hpb_enable.attr,
> NULL,
> };
>
> @@ -841,6 +843,7 @@ out: \
> static DEVICE_ATTR_RO(_name)
>
> UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN);
> +UFS_ATTRIBUTE(max_data_size_hpb_single_cmd, _MAX_HPB_SINGLE_CMD);
> UFS_ATTRIBUTE(current_power_mode, _POWER_MODE);
> UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL);
> UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN);
> @@ -864,6 +867,7 @@ UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
>
> static struct attribute *ufs_sysfs_attributes[] = {
> &dev_attr_boot_lun_enabled.attr,
> + &dev_attr_max_data_size_hpb_single_cmd.attr,
> &dev_attr_current_power_mode.attr,
> &dev_attr_active_icc_level.attr,
> &dev_attr_ooo_data_enabled.attr,
> diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
> index bfb84d2ba990..8c6b38b1b142 100644
> --- a/drivers/scsi/ufs/ufs.h
> +++ b/drivers/scsi/ufs/ufs.h
> @@ -123,12 +123,13 @@ enum flag_idn {
> QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F,
> QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10,
> QUERY_FLAG_IDN_HPB_RESET = 0x11,
> + QUERY_FLAG_IDN_HPB_EN = 0x12,
> };
>
> /* Attribute idn for Query requests */
> enum attr_idn {
> QUERY_ATTR_IDN_BOOT_LU_EN = 0x00,
> - QUERY_ATTR_IDN_RESERVED = 0x01,
> + QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD = 0x01,
> QUERY_ATTR_IDN_POWER_MODE = 0x02,
> QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
> QUERY_ATTR_IDN_OOO_DATA_EN = 0x04,
> diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
> index a7cf9278965c..1653c7a7b066 100644
> --- a/drivers/scsi/ufs/ufshcd.c
> +++ b/drivers/scsi/ufs/ufshcd.c
> @@ -2653,7 +2653,12 @@ static int ufshcd_queuecommand(struct Scsi_Host
> *host, struct scsi_cmnd *cmd)
>
> lrbp->req_abort_skip = false;
>
> - ufshpb_prep(hba, lrbp);
> + err = ufshpb_prep(hba, lrbp);
> + if (err == -EAGAIN) {
> + lrbp->cmd = NULL;
> + ufshcd_release(hba);
> + goto out;
> + }
>
> ufshcd_comp_scsi_upiu(hba, lrbp);
>
> @@ -3107,7 +3112,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum
> query_opcode opcode,
> *
> * Returns 0 for success, non-zero in case of failure
> */
> -static int ufshcd_query_attr_retry(struct ufs_hba *hba,
> +int ufshcd_query_attr_retry(struct ufs_hba *hba,
> enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
> u32 *attr_val)
> {
> @@ -4862,7 +4867,8 @@ static int ufshcd_change_queue_depth(struct
> scsi_device *sdev, int depth)
> static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device
> *sdev)
> {
> /* skip well-known LU */
> - if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
> !ufshpb_is_allowed(hba))
> + if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
> + !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
> return;
>
> ufshpb_destroy_lu(hba, sdev);
> @@ -7454,8 +7460,18 @@ static int ufs_get_device_desc(struct ufs_hba
> *hba)
>
> if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
> (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
> - dev_info->hpb_enabled = true;
> + bool hpb_en = false;
> +
> ufshpb_get_dev_info(hba, desc_buf);
> +
> + if (!ufshpb_is_legacy(hba))
> + err = ufshcd_query_flag_retry(hba,
> + UPIU_QUERY_OPCODE_READ_FLAG,
> + QUERY_FLAG_IDN_HPB_EN, 0,
> + &hpb_en);
> +
> + if (ufshpb_is_legacy(hba) || (!err && hpb_en))
> + dev_info->hpb_enabled = true;
> }
>
> err = ufshcd_read_string_desc(hba, model_index,
> @@ -8028,6 +8044,7 @@ static const struct attribute_group
> *ufshcd_driver_groups[] = {
> &ufs_sysfs_lun_attributes_group,
> #ifdef CONFIG_SCSI_UFS_HPB
> &ufs_sysfs_hpb_stat_group,
> + &ufs_sysfs_hpb_param_group,
> #endif
> NULL,
> };
> diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
> index 008a5f7146c0..8aca8f327981 100644
> --- a/drivers/scsi/ufs/ufshcd.h
> +++ b/drivers/scsi/ufs/ufshcd.h
> @@ -654,6 +654,8 @@ struct ufs_hba_variant_params {
> * @srgn_size: device reported HPB sub-region size
> * @slave_conf_cnt: counter to check all lu finished initialization
> * @hpb_disabled: flag to check if HPB is disabled
> + * @max_hpb_single_cmd: maximum size of single HPB command
> + * @is_legacy: flag to check HPB 1.0
> */
> struct ufshpb_dev_info {
> int num_lu;
> @@ -661,6 +663,8 @@ struct ufshpb_dev_info {
> int srgn_size;
> atomic_t slave_conf_cnt;
> bool hpb_disabled;
> + int max_hpb_single_cmd;
> + bool is_legacy;
> };
> #endif
>
> @@ -1096,6 +1100,9 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
> u8 param_offset,
> u8 *param_read_buf,
> u8 param_size);
> +int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode
> opcode,
> + enum attr_idn idn, u8 index, u8 selector,
> + u32 *attr_val);
> int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
> enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
> int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
> diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
> index f789339f68d9..3ac8b0a9e8d3 100644
> --- a/drivers/scsi/ufs/ufshpb.c
> +++ b/drivers/scsi/ufs/ufshpb.c
> @@ -31,6 +31,11 @@ bool ufshpb_is_allowed(struct ufs_hba *hba)
> return !(hba->ufshpb_dev.hpb_disabled);
> }
>
> +bool ufshpb_is_legacy(struct ufs_hba *hba)
> +{
> + return hba->ufshpb_dev.is_legacy;
> +}
> +
> static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
> {
> return sdev->hostdata;
> @@ -64,9 +69,19 @@ static bool ufshpb_is_write_or_discard_cmd(struct
> scsi_cmnd *cmd)
> op_is_discard(req_op(cmd->request));
> }
>
> -static bool ufshpb_is_support_chunk(int transfer_len)
> +static bool ufshpb_is_support_chunk(struct ufshpb_lu *hpb, int
> transfer_len)
> {
> - return transfer_len <= HPB_MULTI_CHUNK_HIGH;
> + return transfer_len <= hpb->pre_req_max_tr_len;
> +}
> +
> +/*
> + * In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 512KB
> (len=128) as
> + * default. It is possible to change range of transfer_len through
> sysfs.
> + */
> +static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int
> len)
> +{
> + return (len > hpb->pre_req_min_tr_len &&
> + len <= hpb->pre_req_max_tr_len);
> }
>
> static bool ufshpb_is_general_lun(int lun)
> @@ -74,8 +89,7 @@ static bool ufshpb_is_general_lun(int lun)
> return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
> }
>
> -static bool
> -ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
> +static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int
> rgn_idx)
> {
> if (hpb->lu_pinned_end != PINNED_NOT_SET &&
> rgn_idx >= hpb->lu_pinned_start &&
> @@ -264,7 +278,8 @@ ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb,
> unsigned long lpn, int *rgn_idx,
>
> static void
> ufshpb_set_hpb_read_to_upiu(struct ufshpb_lu *hpb, struct ufshcd_lrb
> *lrbp,
> - u32 lpn, u64 ppn, unsigned int transfer_len)
> + u32 lpn, u64 ppn, unsigned int transfer_len,
> + int read_id)
> {
> unsigned char *cdb = lrbp->cmd->cmnd;
>
> @@ -273,15 +288,261 @@ ufshpb_set_hpb_read_to_upiu(struct ufshpb_lu
> *hpb, struct ufshcd_lrb *lrbp,
> /* ppn value is stored as big-endian in the host memory */
> memcpy(&cdb[6], &ppn, sizeof(u64));
> cdb[14] = transfer_len;
> + cdb[15] = read_id;
>
> lrbp->cmd->cmd_len = UFS_CDB_SIZE;
> }
>
> +static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb,
> + unsigned long lpn, unsigned int len,
> + int read_id)
> +{
> + cdb[0] = UFSHPB_WRITE_BUFFER;
> + cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID;
> +
> + put_unaligned_be32(lpn, &cdb[2]);
> + cdb[6] = read_id;
> + put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]);
> +
> + cdb[9] = 0x00; /* Control = 0x00 */
> +}
> +
> +static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb)
> +{
> + struct ufshpb_req *pre_req;
> +
> + if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) {
> + dev_info(&hpb->sdev_ufs_lu->sdev_dev,
> + "pre_req throttle. inflight %d throttle %d",
> + hpb->num_inflight_pre_req, hpb->throttle_pre_req);
> + return NULL;
> + }
> +
> + pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free,
> + struct ufshpb_req, list_req);
> + if (!pre_req) {
> + dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req");
> + return NULL;
> + }
> +
> + list_del_init(&pre_req->list_req);
> + hpb->num_inflight_pre_req++;
> +
> + return pre_req;
> +}
> +
> +static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb,
> + struct ufshpb_req *pre_req)
> +{
> + pre_req->req = NULL;
> + bio_reset(pre_req->bio);
> + list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
> + hpb->num_inflight_pre_req--;
> +}
> +
> +static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t
> error)
> +{
> + struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data;
> + struct ufshpb_lu *hpb = pre_req->hpb;
> + unsigned long flags;
> +
> + if (error) {
> + struct scsi_request *rq = scsi_req(req);
> + struct scsi_sense_hdr sshdr;
> +
> + dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error);
> + scsi_normalize_sense(rq->sense, SCSI_SENSE_BUFFERSIZE,
> + &sshdr);
> + dev_err(&hpb->sdev_ufs_lu->sdev_dev,
> + "code %x sense_key %x asc %x ascq %x",
> + sshdr.response_code,
> + sshdr.sense_key, sshdr.asc, sshdr.ascq);
> + dev_err(&hpb->sdev_ufs_lu->sdev_dev,
> + "byte4 %x byte5 %x byte6 %x additional_len %x",
> + sshdr.byte4, sshdr.byte5,
> + sshdr.byte6, sshdr.additional_length);
> + }
> +
> + blk_mq_free_request(req);
> + spin_lock_irqsave(&hpb->rgn_state_lock, flags);
> + ufshpb_put_pre_req(pre_req->hpb, pre_req);
> + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
> +}
> +
> +static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page
> *page)
> +{
> + struct ufshpb_lu *hpb = pre_req->hpb;
> + struct ufshpb_region *rgn;
> + struct ufshpb_subregion *srgn;
> + u64 *addr;
> + int offset = 0;
> + int copied;
> + unsigned long lpn = pre_req->wb.lpn;
> + int rgn_idx, srgn_idx, srgn_offset;
> + unsigned long flags;
> +
> + addr = page_address(page);
> + ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
> +
> + spin_lock_irqsave(&hpb->rgn_state_lock, flags);
> +
> +next_offset:
> + rgn = hpb->rgn_tbl + rgn_idx;
> + srgn = rgn->srgn_tbl + srgn_idx;
> +
> + if (!ufshpb_is_valid_srgn(rgn, srgn))
> + goto mctx_error;
> +
> + if (!srgn->mctx)
> + goto mctx_error;
> +
> + copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset,
> + pre_req->wb.len - offset,
> + &addr[offset]);
> +
> + if (copied < 0)
> + goto mctx_error;
> +
> + offset += copied;
> + srgn_offset += copied;
> +
> + if (srgn_offset == hpb->entries_per_srgn) {
> + srgn_offset = 0;
> +
> + if (++srgn_idx == hpb->srgns_per_rgn) {
> + srgn_idx = 0;
> + rgn_idx++;
> + }
> + }
> +
> + if (offset < pre_req->wb.len)
> + goto next_offset;
> +
> + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
> + return 0;
> +mctx_error:
> + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
> + return -ENOMEM;
> +}
> +
> +static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb,
> + struct request_queue *q,
> + struct ufshpb_req *pre_req)
> +{
> + struct page *page = pre_req->wb.m_page;
> + struct bio *bio = pre_req->bio;
> + int entries_bytes, ret;
> +
> + if (!page)
> + return -ENOMEM;
> +
> + if (ufshpb_prep_entry(pre_req, page))
> + return -ENOMEM;
> +
> + entries_bytes = pre_req->wb.len * sizeof(u64);
> +
> + ret = bio_add_pc_page(q, bio, page, entries_bytes, 0);
> + if (ret != entries_bytes) {
> + dev_err(&hpb->sdev_ufs_lu->sdev_dev,
> + "bio_add_pc_page fail: %d", ret);
> + return -ENOMEM;
> + }
> + return 0;
> +}
> +
> +static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb)
> +{
> + if (++hpb->cur_read_id >= MAX_HPB_READ_ID)
> + hpb->cur_read_id = 1;
> + return hpb->cur_read_id;
> +}
> +
> +static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct
> scsi_cmnd *cmd,
> + struct ufshpb_req *pre_req, int read_id)
> +{
> + struct scsi_device *sdev = cmd->device;
> + struct request_queue *q = sdev->request_queue;
> + struct request *req;
> + struct scsi_request *rq;
> + struct bio *bio = pre_req->bio;
> +
> + pre_req->hpb = hpb;
> + pre_req->wb.lpn = sectors_to_logical(cmd->device,
> + blk_rq_pos(cmd->request));
> + pre_req->wb.len = sectors_to_logical(cmd->device,
> + blk_rq_sectors(cmd->request));
> + if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req))
> + return -ENOMEM;
> +
> + req = pre_req->req;
> +
> + /* 1. request setup */
> + blk_rq_append_bio(req, &bio);
> + req->rq_disk = NULL;
> + req->end_io_data = (void *)pre_req;
> + req->end_io = ufshpb_pre_req_compl_fn;
> +
> + /* 2. scsi_request setup */
> + rq = scsi_req(req);
> + rq->retries = 1;
> +
> + ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len,
> + read_id);
> + rq->cmd_len = scsi_command_size(rq->cmd);
> +
> + if (blk_insert_cloned_request(q, req) != BLK_STS_OK)
> + return -EAGAIN;
> +
> + hpb->stats.pre_req_cnt++;
> +
> + return 0;
> +}
> +
> +static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct
> scsi_cmnd *cmd,
> + int *read_id)
> +{
> + struct ufshpb_req *pre_req;
> + struct request *req = NULL;
> + unsigned long flags;
> + int _read_id;
> + int ret = 0;
> +
> + req = blk_get_request(cmd->device->request_queue,
> + REQ_OP_SCSI_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT);
> + if (IS_ERR(req))
> + return -EAGAIN;
> +
> + spin_lock_irqsave(&hpb->rgn_state_lock, flags);
> + pre_req = ufshpb_get_pre_req(hpb);
> + if (!pre_req) {
> + ret = -EAGAIN;
> + goto unlock_out;
> + }
> + _read_id = ufshpb_get_read_id(hpb);
> + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
> +
> + pre_req->req = req;
> +
> + ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id);
> + if (ret)
> + goto free_pre_req;
> +
> + *read_id = _read_id;
> +
> + return ret;
> +free_pre_req:
> + spin_lock_irqsave(&hpb->rgn_state_lock, flags);
> + ufshpb_put_pre_req(hpb, pre_req);
> +unlock_out:
> + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
> + blk_put_request(req);
> + return ret;
> +}
> +
> /*
> * This function will set up HPB read command using host-side L2P map
> data.
> - * In HPB v1.0, maximum size of HPB read command is 4KB.
> */
> -void ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
> +int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
> {
> struct ufshpb_lu *hpb;
> struct ufshpb_region *rgn;
> @@ -291,19 +552,20 @@ void ufshpb_prep(struct ufs_hba *hba, struct
> ufshcd_lrb *lrbp)
> u64 ppn;
> unsigned long flags;
> int transfer_len, rgn_idx, srgn_idx, srgn_offset;
> + int read_id = 0;
> int err = 0;
>
> hpb = ufshpb_get_hpb_data(cmd->device);
> if (!hpb)
> - return;
> + return -ENODEV;
>
> if (ufshpb_get_state(hpb) == HPB_INIT)
> - return;
> + return -ENODEV;
>
> if (ufshpb_get_state(hpb) != HPB_PRESENT) {
> dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
> "%s: ufshpb state is not PRESENT", __func__);
> - return;
> + return -ENODEV;
> }
>
> if (blk_rq_is_scsi(cmd->request) ||
> @@ -314,7 +576,7 @@ void ufshpb_prep(struct ufs_hba *hba, struct
> ufshcd_lrb *lrbp)
> transfer_len = sectors_to_logical(cmd->device,
> blk_rq_sectors(cmd->request));
> if (unlikely(!transfer_len))
> - return;
> + return 0;
>
> lpn = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request));
> ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
> @@ -327,18 +589,18 @@ void ufshpb_prep(struct ufs_hba *hba, struct
> ufshcd_lrb *lrbp)
> ufshpb_set_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
> transfer_len);
> spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
> - return;
> + return 0;
> }
>
> - if (!ufshpb_is_support_chunk(transfer_len))
> - return;
> + if (!ufshpb_is_support_chunk(hpb, transfer_len))
> + return 0;
>
> spin_lock_irqsave(&hpb->rgn_state_lock, flags);
> if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
> transfer_len)) {
> hpb->stats.miss_cnt++;
> spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
> - return;
> + return 0;
> }
>
> err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1,
> &ppn);
> @@ -351,64 +613,101 @@ void ufshpb_prep(struct ufs_hba *hba, struct
> ufshcd_lrb *lrbp)
> * active state.
> */
> dev_err(hba->dev, "get ppn failed. err %d\n", err);
> - return;
> + return err;
> + }
> + if (!ufshpb_is_legacy(hba) &&
> + ufshpb_is_required_wb(hpb, transfer_len)) {
> + err = ufshpb_issue_pre_req(hpb, cmd, &read_id);
> + if (err) {
> + unsigned long timeout;
> +
> + timeout = cmd->jiffies_at_alloc + msecs_to_jiffies(
> + hpb->params.requeue_timeout_ms);
> +
> + if (time_before(jiffies, timeout))
> + return -EAGAIN;
> +
> + hpb->stats.miss_cnt++;
> + return 0;
> + }
> }
>
> - ufshpb_set_hpb_read_to_upiu(hpb, lrbp, lpn, ppn, transfer_len);
> + ufshpb_set_hpb_read_to_upiu(hpb, lrbp, lpn, ppn, transfer_len,
> read_id);
>
> hpb->stats.hit_cnt++;
> + return 0;
> }
> -static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
> - struct ufshpb_subregion *srgn)
> +
> +static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
> + int rgn_idx, enum req_opf dir,
> + bool atomic)
You didn't mention this change in cover letter. And I don't see anyone
is passing "atomic" as true, neither in your patches nor Avri's V6
series
(from ufshpb_issue_umap_single_req()). If no one is using the flag,
then
this is dead code. If Avri needs this flag, he can add it in host
control
mode patches. Do I miss anything?