Re: [PATCH v2 11/20] scsi: ufs: Switch to scsi_(get|put)_internal_cmd()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, 2021-11-23 at 11:41 -0800, Bart Van Assche wrote:
> On 11/23/21 4:20 AM, Bean Huo wrote:
> > Calling blk_mq_start_request() will inject the trace print of the
> > block
> > issued, but we do not have its paired completion trace print.
> > In addition, blk_mq_tag_idle() will not be called after the device
> > management request is completed, it will be called after the timer
> > expires.
> > 
> > I remember that we used to not allow this kind of LLD internal
> > commands
> > to be attached to the block layer. I now find that to be correct
> > way.
> 
> Hi Bean,
> 
> How about modifying the block layer such that blk_mq_tag_busy() is
> not
> called for requests with operation type REQ_OP_DRV_*? I think that
> will
> allow to leave out the blk_mq_start_request() calls from the UFS
> driver.
> These are the changes I currently have in mind (on top of this patch
> series):
> 

Hi Bart,

Yes, the following patch can solve these two problems, but you need to
change block layer code. Why do we have to fly to the block layer to
get this tag? and what is the benefit? This is a device management
request. As for the patch recommended by Adrian, that is the way I
think.


Kind regards,
Bean

> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 3ab34c4f20da..a7090b509f2d 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -433,6 +433,7 @@ __blk_mq_alloc_requests_batch(struct
> blk_mq_alloc_data *data,
> 
>   static struct request *__blk_mq_alloc_requests(struct
> blk_mq_alloc_data *data)
>   {
> +	const bool is_passthrough = blk_op_is_passthrough(data-
> >cmd_flags);
>   	struct request_queue *q = data->q;
>   	u64 alloc_time_ns = 0;
>   	struct request *rq;
> @@ -455,8 +456,7 @@ static struct request
> *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
>   		 * dispatch list. Don't include reserved tags in the
>   		 * limiting, as it isn't useful.
>   		 */
> -		if (!op_is_flush(data->cmd_flags) &&
> -		    !blk_op_is_passthrough(data->cmd_flags) &&
> +		if (!op_is_flush(data->cmd_flags) && !is_passthrough &&
>   		    e->type->ops.limit_depth &&
>   		    !(data->flags & BLK_MQ_REQ_RESERVED))
>   			e->type->ops.limit_depth(data->cmd_flags,
> data);
> @@ -465,7 +465,7 @@ static struct request
> *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
>   retry:
>   	data->ctx = blk_mq_get_ctx(q);
>   	data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
> -	if (!(data->rq_flags & RQF_ELV))
> +	if (!(data->rq_flags & RQF_ELV) && !is_passthrough)
>   		blk_mq_tag_busy(data->hctx);
> 
>   	/*
> @@ -575,10 +575,10 @@ struct request
> *blk_mq_alloc_request_hctx(struct request_queue *q,
>   	cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
>   	data.ctx = __blk_mq_get_ctx(q, cpu);
> 
> -	if (!q->elevator)
> -		blk_mq_tag_busy(data.hctx);
> -	else
> +	if (q->elevator)
>   		data.rq_flags |= RQF_ELV;
> +	else if (!blk_op_is_passthrough(data.cmd_flags))
> +		blk_mq_tag_busy(data.hctx);
> 
>   	ret = -EWOULDBLOCK;
>   	tag = blk_mq_get_tag(&data);
> @@ -1369,7 +1369,8 @@ static bool __blk_mq_alloc_driver_tag(struct
> request *rq)
>   	unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
>   	int tag;
> 
> -	blk_mq_tag_busy(rq->mq_hctx);
> +	if (!blk_rq_is_passthrough(rq))
> +		blk_mq_tag_busy(rq->mq_hctx);
> 
>   	if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq-
> >internal_tag)) {
>   		bt = &rq->mq_hctx->tags->breserved_tags;
> diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
> index fcecbc4ee81b..2c9e9c79ca34 100644
> --- a/drivers/scsi/ufs/ufshcd.c
> +++ b/drivers/scsi/ufs/ufshcd.c
> @@ -1360,25 +1360,6 @@ static int ufshcd_devfreq_target(struct device
> *dev,
>   	return ret;
>   }
> 
> -static bool ufshcd_is_busy(struct request *req, void *priv, bool
> reserved)
> -{
> -	int *busy = priv;
> -
> -	WARN_ON_ONCE(reserved);
> -	(*busy)++;
> -	return false;
> -}
> -
> -/* Whether or not any tag is in use by a request that is in
> progress. */
> -static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
> -{
> -	struct request_queue *q = hba->host->internal_queue;
> -	int busy = 0;
> -
> -	blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
> -	return busy;
> -}
> -
>   static int ufshcd_devfreq_get_dev_status(struct device *dev,
>   		struct devfreq_dev_status *stat)
>   {
> @@ -1778,7 +1759,7 @@ static void ufshcd_gate_work(struct work_struct
> *work)
> 
>   	if (hba->clk_gating.active_reqs
>   		|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
> -		|| ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
> +		|| hba->outstanding_reqs || hba->outstanding_tasks
>   		|| hba->active_uic_cmd || hba->uic_async_done)
>   		goto rel_lock;
> 
> @@ -2996,12 +2977,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba
> *hba,
>   	req = scsi_cmd_to_rq(scmd);
>   	tag = req->tag;
>   	WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n",
> tag);
> -	/*
> -	 * Start the request such that blk_mq_tag_idle() is called when
> the
> -	 * device management request finishes.
> -	 */
> -	blk_mq_start_request(req);
> -
>   	lrbp = &hba->lrb[tag];
>   	WARN_ON(lrbp->cmd);
>   	err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
> @@ -6792,12 +6767,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct
> ufs_hba *hba,
>   	req = scsi_cmd_to_rq(scmd);
>   	tag = req->tag;
>   	WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n",
> tag);
> -	/*
> -	 * Start the request such that blk_mq_tag_idle() is called when
> the
> -	 * device management request finishes.
> -	 */
> -	blk_mq_start_request(req);
> -
>   	lrbp = &hba->lrb[tag];
>   	WARN_ON(lrbp->cmd);
>   	lrbp->cmd = NULL;
> 
> Thanks,
> 
> Bart.




[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [SCSI Target Devel]     [Linux SCSI Target Infrastructure]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Samba]     [Device Mapper]

  Powered by Linux