Now that nvmet_req_execute does nothing, open code it. Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@xxxxxxx> Signed-off-by: Dakshaja Uppalapati <dakshaja@xxxxxxxxxxx> Signed-off-by: Christoph Hellwig <hch@xxxxxx> [split patch, update changelog] Signed-off-by: Logan Gunthorpe <logang@xxxxxxxxxxxx> Signed-off-by: Keith Busch <kbusch@xxxxxxxxxx> Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- drivers/nvme/target/core.c | 6 ------ drivers/nvme/target/fc.c | 4 ++-- drivers/nvme/target/loop.c | 2 +- drivers/nvme/target/nvmet.h | 1 - drivers/nvme/target/rdma.c | 4 ++-- drivers/nvme/target/tcp.c | 6 +++--- 6 files changed, 8 insertions(+), 15 deletions(-) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index f45cd324eafb..03b56ed7eca0 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -943,12 +943,6 @@ bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len) } EXPORT_SYMBOL_GPL(nvmet_check_data_len); -void nvmet_req_execute(struct nvmet_req *req) -{ - req->execute(req); -} -EXPORT_SYMBOL_GPL(nvmet_req_execute); - int nvmet_req_alloc_sgl(struct nvmet_req *req) { struct pci_dev *p2p_dev = NULL; diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index ce8d819f86cc..7f9c11138b93 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -2015,7 +2015,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) } /* data transfer complete, resume with nvmet layer */ - nvmet_req_execute(&fod->req); + fod->req.execute(&fod->req); break; case NVMET_FCOP_READDATA: @@ -2231,7 +2231,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, * can invoke the nvmet_layer now. If read data, cmd completion will * push the data */ - nvmet_req_execute(&fod->req); + fod->req.execute(&fod->req); return; transport_error: diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 11f5aea97d1b..f4b878b113f7 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -126,7 +126,7 @@ static void nvme_loop_execute_work(struct work_struct *work) struct nvme_loop_iod *iod = container_of(work, struct nvme_loop_iod, work); - nvmet_req_execute(&iod->req); + iod->req.execute(&iod->req); } static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index ff55f1005b35..46df45e837c9 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -374,7 +374,6 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); void nvmet_req_uninit(struct nvmet_req *req); bool nvmet_check_data_len(struct nvmet_req *req, size_t data_len); -void nvmet_req_execute(struct nvmet_req *req); void nvmet_req_complete(struct nvmet_req *req, u16 status); int nvmet_req_alloc_sgl(struct nvmet_req *req); void nvmet_req_free_sgl(struct nvmet_req *req); diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 36d906a7f70d..248e68da2e13 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -603,7 +603,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) return; } - nvmet_req_execute(&rsp->req); + rsp->req.execute(&rsp->req); } static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, @@ -746,7 +746,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) queue->cm_id->port_num, &rsp->read_cqe, NULL)) nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); } else { - nvmet_req_execute(&rsp->req); + rsp->req.execute(&rsp->req); } return true; diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 22014e76d771..72777d558f1c 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -938,7 +938,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) goto out; } - nvmet_req_execute(&queue->cmd->req); + queue->cmd->req.execute(&queue->cmd->req); out: nvmet_prepare_receive_pdu(queue); return ret; @@ -1058,7 +1058,7 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) nvmet_tcp_prep_recv_ddgst(cmd); return 0; } - nvmet_req_execute(&cmd->req); + cmd->req.execute(&cmd->req); } nvmet_prepare_receive_pdu(queue); @@ -1098,7 +1098,7 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && cmd->rbytes_done == cmd->req.transfer_len) - nvmet_req_execute(&cmd->req); + cmd->req.execute(&cmd->req); ret = 0; out: nvmet_prepare_receive_pdu(queue); -- 2.18.0.232.gb7bd9486b.dirty