On 10/13/21 9:16 AM, Christoph Hellwig wrote: > On Wed, Oct 13, 2021 at 09:10:01AM -0600, Jens Axboe wrote: >>> Also - can you look into turning this logic into an inline function with >>> a callback for the driver? I think in general gcc will avoid the >>> indirect call for function pointers passed directly. That way we can >>> keep a nice code structure but also avoid the indirections. >>> >>> Same for nvme_pci_complete_batch. >> >> Not sure I follow. It's hard to do a generic callback for this, as the >> batch can live outside the block layer through the plug. That's why >> it's passed the way it is in terms of completion hooks. > > Basically turn nvme_pci_complete_batch into a core nvme helper (inline) > with nvme_pci_unmap_rq passed as a function pointer that gets inlined. Something like this? diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 0ac7bad405ef..1aff0ca37063 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -346,15 +346,19 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req) return RETRY; } -static inline void nvme_end_req(struct request *req) +static inline void nvme_end_req_zoned(struct request *req) { - blk_status_t status = nvme_error_status(nvme_req(req)->status); - if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && req_op(req) == REQ_OP_ZONE_APPEND) req->__sector = nvme_lba_to_sect(req->q->queuedata, le64_to_cpu(nvme_req(req)->result.u64)); +} + +static inline void nvme_end_req(struct request *req) +{ + blk_status_t status = nvme_error_status(nvme_req(req)->status); + nvme_end_req_zoned(req); nvme_trace_bio_complete(req); blk_mq_end_request(req, status); } @@ -381,6 +385,23 @@ void nvme_complete_rq(struct request *req) } EXPORT_SYMBOL_GPL(nvme_complete_rq); +void nvme_complete_batch(struct io_batch *iob, void (*fn)(struct request *rq)) +{ + struct request *req; + + req = rq_list_peek(&iob->req_list); + while (req) { + fn(req); + nvme_cleanup_cmd(req); + nvme_end_req_zoned(req); + req->status = BLK_STS_OK; + req = rq_list_next(req); + } + + blk_mq_end_request_batch(iob); +} +EXPORT_SYMBOL_GPL(nvme_complete_batch); + /* * Called to unwind from ->queue_rq on a failed command submission so that the * multipathing code gets called to potentially failover to another path. diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index ed79a6c7e804..b73a573472d9 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -638,6 +638,7 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) } void nvme_complete_rq(struct request *req); +void nvme_complete_batch(struct io_batch *iob, void (*fn)(struct request *)); blk_status_t nvme_host_path_error(struct request *req); bool nvme_cancel_request(struct request *req, void *data, bool reserved); void nvme_cancel_tagset(struct nvme_ctrl *ctrl); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index b8dbee47fced..e79c0f0268b3 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -992,22 +992,7 @@ static void nvme_pci_complete_rq(struct request *req) static void nvme_pci_complete_batch(struct io_batch *iob) { - struct request *req; - - req = rq_list_peek(&iob->req_list); - while (req) { - nvme_pci_unmap_rq(req); - if (req->rq_flags & RQF_SPECIAL_PAYLOAD) - nvme_cleanup_cmd(req); - if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && - req_op(req) == REQ_OP_ZONE_APPEND) - req->__sector = nvme_lba_to_sect(req->q->queuedata, - le64_to_cpu(nvme_req(req)->result.u64)); - req->status = BLK_STS_OK; - req = rq_list_next(req); - } - - blk_mq_end_request_batch(iob); + nvme_complete_batch(iob, nvme_pci_unmap_rq); } /* We read the CQE phase first to check if the rest of the entry is valid */ -- Jens Axboe