From: Chaitanya Kulkarni <kch@xxxxxxxxxx> Not all devices can support verify requests which can be mapped to the controller specific command. This patch adds a way to emulate REQ_OP_VERIFY for NVMeOF block device namespace. We add a new workqueue to offload the emulation with the help of __blkdev_emulate_verify(). Signed-off-by: Chaitanya Kulkarni <kch@xxxxxxxxxx> --- drivers/nvme/target/core.c | 12 +++++++- drivers/nvme/target/io-cmd-bdev.c | 51 ++++++++++++++++++++++++++----- drivers/nvme/target/nvmet.h | 3 ++ 3 files changed, 57 insertions(+), 9 deletions(-) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 8ce4d59cc9e7..8a17a6479073 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -16,6 +16,7 @@ #include "nvmet.h" struct workqueue_struct *buffered_io_wq; +struct workqueue_struct *verify_wq; static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; static DEFINE_IDA(cntlid_ida); @@ -1546,11 +1547,17 @@ static int __init nvmet_init(void) nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1; + verify_wq = alloc_workqueue("nvmet-verify-wq", WQ_MEM_RECLAIM, 0); + if (!verify_wq) { + error = -ENOMEM; + goto out; + } + buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq", WQ_MEM_RECLAIM, 0); if (!buffered_io_wq) { error = -ENOMEM; - goto out; + goto out_free_verify_work_queue; } error = nvmet_init_discovery(); @@ -1566,6 +1573,8 @@ static int __init nvmet_init(void) nvmet_exit_discovery(); out_free_work_queue: destroy_workqueue(buffered_io_wq); +out_free_verify_work_queue: + destroy_workqueue(verify_wq); out: return error; } @@ -1576,6 +1585,7 @@ static void __exit nvmet_exit(void) nvmet_exit_discovery(); ida_destroy(&cntlid_ida); destroy_workqueue(buffered_io_wq); + destroy_workqueue(verify_wq); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024); diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 5a888cdadfea..80b8e7bfd1ae 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -433,25 +433,60 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req) } } -static void nvmet_bdev_execute_verify(struct nvmet_req *req) +static void __nvmet_req_to_verify_sectors(struct nvmet_req *req, + sector_t *sects, sector_t *nr_sects) { struct nvme_verify_cmd *verify = &req->cmd->verify; + + *sects = le64_to_cpu(verify->slba) << (req->ns->blksize_shift - 9); + *nr_sects = (((sector_t)le16_to_cpu(verify->length) + 1) << + (req->ns->blksize_shift - 9)); +} + +static void nvmet_bdev_emulate_verify_work(struct work_struct *w) +{ + struct nvmet_req *req = container_of(w, struct nvmet_req, b.work); + sector_t nr_sector; + sector_t sector; + int ret = 0; + + __nvmet_req_to_verify_sectors(req, §or, &nr_sector); + if (!nr_sector) + goto out; + + ret = blkdev_emulate_verify(req->ns->bdev, sector, nr_sector, + GFP_KERNEL); +out: + nvmet_req_complete(req, + blk_to_nvme_status(req, errno_to_blk_status(ret))); +} + +static void nvmet_bdev_submit_emulate_verify(struct nvmet_req *req) +{ + INIT_WORK(&req->b.work, nvmet_bdev_emulate_verify_work); + queue_work(verify_wq, &req->b.work); +} + + +static void nvmet_bdev_execute_verify(struct nvmet_req *req) +{ struct bio *bio = NULL; sector_t nr_sector; sector_t sector; - int ret; + int ret = 0; if (!nvmet_check_transfer_len(req, 0)) return; + /* offload emulation */ if (!bdev_verify_sectors(req->ns->bdev)) { - nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); + nvmet_bdev_submit_emulate_verify(req); return; } - sector = le64_to_cpu(verify->slba) << (req->ns->blksize_shift - 9); - nr_sector = (((sector_t)le16_to_cpu(verify->length) + 1) << - (req->ns->blksize_shift - 9)); + __nvmet_req_to_verify_sectors(req, §or, &nr_sector); + if (!nr_sector) + goto out; ret = __blkdev_issue_verify(req->ns->bdev, sector, nr_sector, GFP_KERNEL, &bio); @@ -459,9 +494,9 @@ static void nvmet_bdev_execute_verify(struct nvmet_req *req) bio->bi_private = req; bio->bi_end_io = nvmet_bio_done; submit_bio(bio); - } else { - nvmet_req_complete(req, errno_to_nvme_status(req, ret)); } +out: + nvmet_req_complete(req, errno_to_nvme_status(req, ret)); } u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 8776dd1a0490..7f3f584b1e7b 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -323,6 +323,8 @@ struct nvmet_req { union { struct { struct bio inline_bio; + /* XXX: should we take work out of union ? */ + struct work_struct work; } b; struct { bool mpool_alloc; @@ -355,6 +357,7 @@ struct nvmet_req { }; extern struct workqueue_struct *buffered_io_wq; +extern struct workqueue_struct *verify_wq; static inline void nvmet_set_result(struct nvmet_req *req, u32 result) { -- 2.22.1