Re: [PATCH v3 2/2] virtio-blk: support mq_ops->queue_rqs()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Mar 28, 2022 at 02:16:13PM +0100, Stefan Hajnoczi wrote:
> On Thu, Mar 24, 2022 at 11:04:50PM +0900, Suwan Kim wrote:
> > @@ -367,6 +381,66 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
> >  	return BLK_STS_OK;
> >  }
> >  
> > +static bool virtblk_prep_rq_batch(struct virtio_blk_vq *vq, struct request *req)
> > +{
> > +	struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
> > +	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
> > +	unsigned long flags;
> > +	int num, err;
> > +
> > +	req->mq_hctx->tags->rqs[req->tag] = req;
> > +
> > +	if (virtblk_prep_rq(req->mq_hctx, vblk, req, vbr, &num) != BLK_STS_OK)
> > +		return false;
> > +
> > +	spin_lock_irqsave(&vq->lock, flags);
> > +	err = virtblk_add_req(vq->vq, vbr, vbr->sg_table.sgl, num);
> > +	if (err) {
> > +		spin_unlock_irqrestore(&vq->lock, flags);
> > +		virtblk_unmap_data(req, vbr);
> > +		virtblk_cleanup_cmd(req);
> > +		return false;
> > +	}
> > +	spin_unlock_irqrestore(&vq->lock, flags);
> 
> Simplification:
> 
>   spin_lock_irqsave(&vq->lock, flags);
>   err = virtblk_add_req(vq->vq, vbr, vbr->sg_table.sgl, num);
>   spin_unlock_irqrestore(&vq->lock, flags);
>   if (err) {
>       virtblk_unmap_data(req, vbr);
>       virtblk_cleanup_cmd(req);
>       return false;
>   }
> 

Thanks! I will fix it.

> > +
> > +	return true;
> > +}
> > +
> > +static void virtio_queue_rqs(struct request **rqlist)
> > +{
> > +	struct request *req, *next, *prev = NULL;
> > +	struct request *requeue_list = NULL;
> > +
> > +	rq_list_for_each_safe(rqlist, req, next) {
> > +		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
> > +		unsigned long flags;
> > +		bool kick;
> > +
> > +		if (!virtblk_prep_rq_batch(vq, req)) {
> > +			rq_list_move(rqlist, &requeue_list, req, prev);
> > +			req = prev;
> > +
> > +			if (!req)
> > +				continue;
> > +		}
> > +
> > +		if (!next || req->mq_hctx != next->mq_hctx) {
> > +			spin_lock_irqsave(&vq->lock, flags);
> 
> Did you try calling virtblk_add_req() here to avoid acquiring and
> releasing the lock multiple times? In other words, do virtblk_prep_rq()
> but wait until we get here to do virtblk_add_req().
> 
> I don't know if it has any measurable effect on performance or maybe the
> code would become too complex, but I noticed that we're not fully
> exploiting batching.

I tried as you said. I called virtlblk_add_req() and added requests
of rqlist to virtqueue in this if statement with holding the lock
only once.

I attach the code at the end of this mail.
Please refer the code.

But I didn't see improvement. It showed slightly worse performance
than the current patch.

> > +			kick = virtqueue_kick_prepare(vq->vq);
> > +			spin_unlock_irqrestore(&vq->lock, flags);
> > +			if (kick)
> > +				virtqueue_notify(vq->vq);
> > +
> > +			req->rq_next = NULL;

Did you ask this part?

> > +			*rqlist = next;
> > +			prev = NULL;
> > +		} else
> > +			prev = req;
> 
> What guarantees that req is still alive after we called
> virtblk_add_req()? The device may have seen it and completed it already
> by the time we get here.

Isn't request completed after the kick?

If you asked about "req->rq_next = NULL",
I think it should be placed before
"kick = virtqueue_kick_prepare(vq->vq);"

-----------
	req->rq_next = NULL;
	kick = virtqueue_kick_prepare(vq->vq);
	spin_unlock_irqrestore(&vq->lock, flags);
	if (kick)
		virtqueue_notify(vq->vq);
-----------

Regards,
Suwan Kim

---

diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2218cab39c72..d972d3042068 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -92,6 +92,7 @@ struct virtio_blk {
 struct virtblk_req {
 	struct virtio_blk_outhdr out_hdr;
 	u8 status;
+	int sg_num;
 	struct sg_table sg_table;
 	struct scatterlist sg[];
 };
@@ -311,18 +312,13 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
 		virtqueue_notify(vq->vq);
 }

-static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
-			   const struct blk_mq_queue_data *bd)
+static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
+					struct virtio_blk *vblk,
+					struct request *req,
+					struct virtblk_req *vbr)
 {
-	struct virtio_blk *vblk = hctx->queue->queuedata;
-	struct request *req = bd->rq;
-	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
-	unsigned long flags;
-	int num;
-	int qid = hctx->queue_num;
-	bool notify = false;
 	blk_status_t status;
-	int err;
+	int num;

 	status = virtblk_setup_cmd(vblk->vdev, req, vbr);
 	if (unlikely(status))
@@ -335,9 +331,30 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 		virtblk_cleanup_cmd(req);
 		return BLK_STS_RESOURCE;
 	}
+	vbr->sg_num = num;
+
+	return BLK_STS_OK;
+}
+
+static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
+			   const struct blk_mq_queue_data *bd)
+{
+	struct virtio_blk *vblk = hctx->queue->queuedata;
+	struct request *req = bd->rq;
+	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+	unsigned long flags;
+	int qid = hctx->queue_num;
+	bool notify = false;
+	blk_status_t status;
+	int err;
+
+	status = virtblk_prep_rq(hctx, vblk, req, vbr);
+	if (unlikely(status))
+		return status;

 	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
-	err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num);
+	err = virtblk_add_req(vblk->vqs[qid].vq, vbr,
+				vbr->sg_table.sgl, vbr->sg_num);
 	if (err) {
 		virtqueue_kick(vblk->vqs[qid].vq);
 		/* Don't stop the queue if -ENOMEM: we may have failed to
@@ -367,6 +384,76 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 	return BLK_STS_OK;
 }

+static bool virtblk_prep_rq_batch(struct virtio_blk_vq *vq, struct request *req)
+{
+	struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
+	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+	req->mq_hctx->tags->rqs[req->tag] = req;
+
+	return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
+}
+
+static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
+					struct request **rqlist,
+					struct request **requeue_list)
+{
+	unsigned long flags;
+	int err;
+	bool kick;
+
+	spin_lock_irqsave(&vq->lock, flags);
+	while (!rq_list_empty(*rqlist)) {
+		struct request *req = rq_list_pop(rqlist);
+		struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+		err = virtblk_add_req(vq->vq, vbr,
+					vbr->sg_table.sgl, vbr->sg_num);
+		if (err) {
+			virtblk_unmap_data(req, vbr);
+			virtblk_cleanup_cmd(req);
+			rq_list_add(requeue_list, req);
+		}
+	}
+
+	kick = virtqueue_kick_prepare(vq->vq);
+	spin_unlock_irqrestore(&vq->lock, flags);
+
+	return kick;
+}
+
+static void virtio_queue_rqs(struct request **rqlist)
+{
+	struct request *req, *next, *prev = NULL;
+	struct request *requeue_list = NULL;
+
+	rq_list_for_each_safe(rqlist, req, next) {
+		struct virtio_blk_vq *vq = req->mq_hctx->driver_data;
+		bool kick;
+
+		if (!virtblk_prep_rq_batch(vq, req)) {
+			rq_list_move(rqlist, &requeue_list, req, prev);
+			req = prev;
+
+			if (!req)
+				continue;
+		}
+
+		if (!next || req->mq_hctx != next->mq_hctx) {
+			kick = virtblk_add_req_batch(vq, rqlist, &requeue_list);
+			if (kick)
+				virtqueue_notify(vq->vq);
+
+			req->rq_next = NULL;
+			*rqlist = next;
+			prev = NULL;
+		} else
+			prev = req;
+	}
+
+	*rqlist = requeue_list;
+}
+
 /* return id (s/n) string for *disk to *id_str
  */
 static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -818,6 +905,7 @@ static int virtblk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,

 static const struct blk_mq_ops virtio_mq_ops = {
 	.queue_rq	= virtio_queue_rq,
+	.queue_rqs	= virtio_queue_rqs,
 	.commit_rqs	= virtio_commit_rqs,
 	.init_hctx	= virtblk_init_hctx,
 	.complete	= virtblk_request_done,




[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux