From: Israel Rukshin <israelr@xxxxxxxxxxxx> Create a block IO request for the metadata from the protection SG list. Signed-off-by: Israel Rukshin <israelr@xxxxxxxxxxxx> Signed-off-by: Max Gurtovoy <maxg@xxxxxxxxxxxx> --- drivers/nvme/target/io-cmd-bdev.c | 87 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 2e9e309..018acd7 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -164,6 +164,61 @@ static void nvmet_bio_done(struct bio *bio) bio_put(bio); } +#ifdef CONFIG_BLK_DEV_INTEGRITY +static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, + struct sg_mapping_iter *miter) +{ + struct blk_integrity *bi; + struct bio_integrity_payload *bip; + struct block_device *bdev = req->ns->bdev; + int rc; + size_t resid, len; + + bi = bdev_get_integrity(bdev); + if (unlikely(!bi)) { + pr_err("Unable to locate bio_integrity\n"); + return -ENODEV; + } + + bip = bio_integrity_alloc(bio, GFP_NOIO, + min_t(unsigned int, req->md_sg_cnt, BIO_MAX_PAGES)); + if (IS_ERR(bip)) { + pr_err("Unable to allocate bio_integrity_payload\n"); + return PTR_ERR(bip); + } + + bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); + /* virtual start sector must be in integrity interval units */ + bip_set_seed(bip, bio->bi_iter.bi_sector >> + (bi->interval_exp - SECTOR_SHIFT)); + + resid = bip->bip_iter.bi_size; + while (resid > 0 && sg_miter_next(miter)) { + len = min_t(size_t, miter->length, resid); + rc = bio_integrity_add_page(bio, miter->page, len, + offset_in_page(miter->addr)); + if (unlikely(rc != len)) { + pr_err("bio_integrity_add_page() failed; %d\n", rc); + sg_miter_stop(miter); + return -ENOMEM; + } + + resid -= len; + if (len < miter->length) + miter->consumed -= miter->length - len; + } + sg_miter_stop(miter); + + return 0; +} +#else +static int nvmet_bdev_alloc_bip(struct nvmet_req *req, struct bio *bio, + struct sg_mapping_iter *miter) +{ + return 0; +} +#endif /* CONFIG_BLK_DEV_INTEGRITY */ + static void nvmet_bdev_execute_rw(struct nvmet_req *req) { int sg_cnt = req->sg_cnt; @@ -171,9 +226,11 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) struct scatterlist *sg; struct blk_plug plug; sector_t sector; - int op, i; + int op, i, rc; + struct sg_mapping_iter prot_miter; - if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) + if (!nvmet_check_transfer_len(req, + nvmet_rw_data_len(req) + req->md_len)) return; if (!req->sg_cnt) { @@ -208,11 +265,25 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) bio->bi_opf = op; blk_start_plug(&plug); + if (req->use_md) + sg_miter_start(&prot_miter, req->md_sg, req->md_sg_cnt, + op == REQ_OP_READ ? SG_MITER_FROM_SG : + SG_MITER_TO_SG); + for_each_sg(req->sg, sg, req->sg_cnt, i) { while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) != sg->length) { struct bio *prev = bio; + if (req->use_md) { + rc = nvmet_bdev_alloc_bip(req, bio, + &prot_miter); + if (unlikely(rc)) { + bio_io_error(bio); + return; + } + } + bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); bio_set_dev(bio, req->ns->bdev); bio->bi_iter.bi_sector = sector; @@ -226,6 +297,14 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) sg_cnt--; } + if (req->use_md) { + rc = nvmet_bdev_alloc_bip(req, bio, &prot_miter); + if (unlikely(rc)) { + bio_io_error(bio); + return; + } + } + submit_bio(bio); blk_finish_plug(&plug); } @@ -353,6 +432,10 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) case nvme_cmd_read: case nvme_cmd_write: req->execute = nvmet_bdev_execute_rw; + if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) { + req->use_md = true; + req->md_len = nvmet_rw_md_len(req); + } return 0; case nvme_cmd_flush: req->execute = nvmet_bdev_execute_flush; -- 1.8.3.1