Add support for handling nvme_cmd_copy command on target. For bdev-ns we call into blkdev_issue_copy, which the block layer completes by a offloaded copy request to backend bdev or by emulating the request. For file-ns we call vfs_copy_file_range to service our request. Currently target always shows copy capability by setting NVME_CTRL_ONCS_COPY in controller ONCS. loop target has copy support, which can be used to test copy offload. Signed-off-by: Nitesh Shetty <nj.shetty@xxxxxxxxxxx> Signed-off-by: Anuj Gupta <anuj20.g@xxxxxxxxxxx> --- drivers/nvme/target/admin-cmd.c | 9 ++++- drivers/nvme/target/io-cmd-bdev.c | 62 +++++++++++++++++++++++++++++++ drivers/nvme/target/io-cmd-file.c | 52 ++++++++++++++++++++++++++ drivers/nvme/target/loop.c | 6 +++ drivers/nvme/target/nvmet.h | 1 + 5 files changed, 128 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 39cb570f833d..8e644b8ec0fd 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -433,8 +433,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES); id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | - NVME_CTRL_ONCS_WRITE_ZEROES); - + NVME_CTRL_ONCS_WRITE_ZEROES | NVME_CTRL_ONCS_COPY); /* XXX: don't report vwc if the underlying device is write through */ id->vwc = NVME_CTRL_VWC_PRESENT; @@ -536,6 +535,12 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) if (req->ns->bdev) nvmet_bdev_set_limits(req->ns->bdev, id); + else { + id->msrc = (__force u8)to0based(BIO_MAX_VECS - 1); + id->mssrl = cpu_to_le16(BIO_MAX_VECS << + (PAGE_SHIFT - SECTOR_SHIFT)); + id->mcl = cpu_to_le32(le16_to_cpu(id->mssrl)); + } /* * We just provide a single LBA format that matches what the diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index c2d6cea0236b..d92dfe86c647 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -46,6 +46,18 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) id->npda = id->npdg; /* NOWS = Namespace Optimal Write Size */ id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev)); + + if (bdev_max_copy_sectors(bdev)) { + id->msrc = id->msrc; + id->mssrl = cpu_to_le16((bdev_max_copy_sectors(bdev) << + SECTOR_SHIFT) / bdev_logical_block_size(bdev)); + id->mcl = cpu_to_le32((__force u32)id->mssrl); + } else { + id->msrc = (__force u8)to0based(BIO_MAX_VECS - 1); + id->mssrl = cpu_to_le16((BIO_MAX_VECS << PAGE_SHIFT) / + bdev_logical_block_size(bdev)); + id->mcl = cpu_to_le32((__force u32)id->mssrl); + } } void nvmet_bdev_ns_disable(struct nvmet_ns *ns) @@ -184,6 +196,21 @@ static void nvmet_bio_done(struct bio *bio) nvmet_req_bio_put(req, bio); } +static void nvmet_bdev_copy_end_io(void *private, int comp_len) +{ + struct nvmet_req *req = (struct nvmet_req *)private; + u16 status; + + if (comp_len == req->copy_len) { + req->cqe->result.u32 = cpu_to_le32(1); + status = errno_to_nvme_status(req, 0); + } else { + req->cqe->result.u32 = cpu_to_le32(0); + status = errno_to_nvme_status(req, (__force u16)BLK_STS_IOERR); + } + nvmet_req_complete(req, status); +} + #ifdef CONFIG_BLK_DEV_INTEGRITY static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, struct sg_mapping_iter *miter) @@ -450,6 +477,37 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req) } } +/* At present we handle only one range entry, since copy offload is aligned with + * copy_file_range, only one entry is passed from block layer. + */ +static void nvmet_bdev_execute_copy(struct nvmet_req *req) +{ + struct nvme_copy_range range; + struct nvme_command *cmd = req->cmd; + int ret; + u16 status; + + status = nvmet_copy_from_sgl(req, 0, &range, sizeof(range)); + if (status) + goto out; + + ret = blkdev_issue_copy(req->ns->bdev, + le64_to_cpu(cmd->copy.sdlba) << req->ns->blksize_shift, + req->ns->bdev, + le64_to_cpu(range.slba) << req->ns->blksize_shift, + (le16_to_cpu(range.nlb) + 1) << req->ns->blksize_shift, + nvmet_bdev_copy_end_io, (void *)req, GFP_KERNEL); + if (ret) { + req->cqe->result.u32 = cpu_to_le32(0); + status = blk_to_nvme_status(req, BLK_STS_IOERR); + goto out; + } + + return; +out: + nvmet_req_complete(req, errno_to_nvme_status(req, status)); +} + u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) { switch (req->cmd->common.opcode) { @@ -468,6 +526,10 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) case nvme_cmd_write_zeroes: req->execute = nvmet_bdev_execute_write_zeroes; return 0; + case nvme_cmd_copy: + req->execute = nvmet_bdev_execute_copy; + return 0; + default: return nvmet_report_invalid_opcode(req); } diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 2d068439b129..f61aa834f7a5 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -322,6 +322,49 @@ static void nvmet_file_dsm_work(struct work_struct *w) } } +static void nvmet_file_copy_work(struct work_struct *w) +{ + struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); + int nr_range = req->cmd->copy.nr_range + 1; + u16 status = 0; + int src, id; + ssize_t len, ret; + loff_t pos; + + pos = le64_to_cpu(req->cmd->copy.sdlba) << req->ns->blksize_shift; + if (unlikely(pos + req->transfer_len > req->ns->size)) { + nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC)); + return; + } + + for (id = 0 ; id < nr_range; id++) { + struct nvme_copy_range range; + + status = nvmet_copy_from_sgl(req, id * sizeof(range), &range, + sizeof(range)); + if (status) + goto out; + + src = (le64_to_cpu(range.slba) << (req->ns->blksize_shift)); + len = (le16_to_cpu(range.nlb) + 1) << (req->ns->blksize_shift); + ret = vfs_copy_file_range(req->ns->file, src, req->ns->file, + pos, len, 0); + if (ret != len) { + pos += ret; + req->cqe->result.u32 = cpu_to_le32(id); + if (ret < 0) + status = errno_to_nvme_status(req, ret); + else + status = errno_to_nvme_status(req, -EIO); + goto out; + } else + pos += len; + } + +out: + nvmet_req_complete(req, status); +} + static void nvmet_file_execute_dsm(struct nvmet_req *req) { if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) @@ -330,6 +373,12 @@ static void nvmet_file_execute_dsm(struct nvmet_req *req) queue_work(nvmet_wq, &req->f.work); } +static void nvmet_file_execute_copy(struct nvmet_req *req) +{ + INIT_WORK(&req->f.work, nvmet_file_copy_work); + queue_work(nvmet_wq, &req->f.work); +} + static void nvmet_file_write_zeroes_work(struct work_struct *w) { struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); @@ -376,6 +425,9 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) case nvme_cmd_write_zeroes: req->execute = nvmet_file_execute_write_zeroes; return 0; + case nvme_cmd_copy: + req->execute = nvmet_file_execute_copy; + return 0; default: return nvmet_report_invalid_opcode(req); } diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index f2d24b2d992f..d18ed8067a15 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -146,6 +146,12 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, return ret; nvme_start_request(req); + if (unlikely((req->cmd_flags & REQ_COPY) && + (req_op(req) == REQ_OP_READ))) { + blk_mq_set_request_complete(req); + blk_mq_end_request(req, BLK_STS_OK); + return BLK_STS_OK; + } iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; iod->req.port = queue->ctrl->port; if (!nvmet_req_init(&iod->req, &queue->nvme_cq, diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index dc60a22646f7..1615dc9194ba 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -393,6 +393,7 @@ struct nvmet_req { struct device *p2p_client; u16 error_loc; u64 error_slba; + size_t copy_len; }; #define NVMET_MAX_MPOOL_BVEC 16 -- 2.35.1.500.gb896f729e2 -- dm-devel mailing list dm-devel@xxxxxxxxxx https://listman.redhat.com/mailman/listinfo/dm-devel