Re: [PATCH 6/9] nvme: add support for batched completion of polled IO

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, Oct 13, 2021 at 10:54:13AM -0600, Jens Axboe wrote:
> +void nvme_complete_batch_req(struct request *req)
> +{
> +	nvme_cleanup_cmd(req);
> +	nvme_end_req_zoned(req);
> +	req->status = BLK_STS_OK;
> +}
> +EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
> +

I'd be tempted to just merge this helper into the only caller.
nvme_cleanup_cmd is exported anyway, so this would just add an export
for nvme_end_req_zoned.

> +static __always_inline void nvme_complete_batch(struct io_batch *iob,
> +						void (*fn)(struct request *rq))
> +{
> +	struct request *req;
> +
> +	req = rq_list_peek(&iob->req_list);
> +	while (req) {
> +		fn(req);
> +		nvme_complete_batch_req(req);
> +		req = rq_list_next(req);
> +	}
> +
> +	blk_mq_end_request_batch(iob);

Can we turn this into a normal for loop?

	for (req = rq_list_peek(&iob->req_list); req; req = rq_list_next(req)) {
		..
	}

> +	if (!nvme_try_complete_req(req, cqe->status, cqe->result)) {
> +		/*
> +		 * Do normal inline completion if we don't have a batch
> +		 * list, if we have an end_io handler, or if the status of
> +		 * the request isn't just normal success.
> +		 */
> +		if (!iob || req->end_io || nvme_req(req)->status)
> +			nvme_pci_complete_rq(req);
> +		else
> +			rq_list_add_tail(&iob->req_list, req);
> +	}

The check for the conditions where we can or cannot batch complete
really should go into a block layer helper.  Something like the
incremental patch below:

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index ce69e9666caac..57bef8229bfab 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1034,17 +1034,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
 	}
 
 	trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
-	if (!nvme_try_complete_req(req, cqe->status, cqe->result)) {
-		/*
-		 * Do normal inline completion if we don't have a batch
-		 * list, if we have an end_io handler, or if the status of
-		 * the request isn't just normal success.
-		 */
-		if (!iob || req->end_io || nvme_req(req)->status)
-			nvme_pci_complete_rq(req);
-		else
-			rq_list_add_tail(&iob->req_list, req);
-	}
+	if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
+	    !blk_mq_add_to_batch(req, iob, nvme_req(req)->status))
+		nvme_pci_complete_rq(req);
 }
 
 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index aea7d866a34c6..383d887e32f6d 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -773,6 +773,19 @@ void blk_mq_end_request(struct request *rq, blk_status_t error);
 void __blk_mq_end_request(struct request *rq, blk_status_t error);
 void blk_mq_end_request_batch(struct io_batch *ib);
 
+/*
+ * Batched completions only work when there is no I/O error and not special
+ * ->end_io handler.
+ */
+static inline bool blk_mq_add_to_batch(struct request *req,
+		 struct io_batch *iob, bool ioerror)
+{
+	if (!iob || req->end_io || ioerror)
+		return false;
+	rq_list_add_tail(&iob->req_list, req);
+	return true;
+}
+
 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);



[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux