From: Damien Le Moal <damien.lemoal@xxxxxxx> Support REQ_OP_ZONE_APPEND requests for zone mode null_blk devices. Use the internally tracked zone write pointer position as the actual write position, which is returned using the command request __sector field in the case of an mq device and using the command BIO sector in the case of a BIO device. Since the write position is used for data copy in the case of a memory backed device, reverse the order in which null_handle_zoned() and null_handle_memory_backed() are called to ensure that null_handle_memory_backed() sees the correct write position for REQ_OP_ZONE_APPEND operations. Signed-off-by: Damien Le Moal <damien.lemoal@xxxxxxx> Signed-off-by: Johannes Thumshirn <johannes.thumshirn@xxxxxxx> --- drivers/block/null_blk_main.c | 9 +++++--- drivers/block/null_blk_zoned.c | 38 +++++++++++++++++++++++++++------- 2 files changed, 37 insertions(+), 10 deletions(-) diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 3e45e3640c12..5492f1e49eee 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1300,12 +1300,15 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, goto out; } + if (dev->zoned) { + cmd->error = null_handle_zoned(cmd, op, sector, nr_sectors); + if (cmd->error != BLK_STS_OK) + goto out; + } + if (dev->memory_backed) cmd->error = null_handle_memory_backed(cmd, op); - if (!cmd->error && dev->zoned) - cmd->error = null_handle_zoned(cmd, op, sector, nr_sectors); - out: nullb_complete_cmd(cmd); return BLK_STS_OK; diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index 8259f3212a28..f20be7b91b9f 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -67,13 +67,22 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) int null_register_zoned_dev(struct nullb *nullb) { + struct nullb_device *dev = nullb->dev; struct request_queue *q = nullb->q; - if (queue_is_mq(q)) - return blk_revalidate_disk_zones(nullb->disk); + if (queue_is_mq(q)) { + int ret = blk_revalidate_disk_zones(nullb->disk); + + if (ret) + return ret; + } else { + blk_queue_chunk_sectors(q, dev->zone_size_sects); + q->nr_zones = blkdev_nr_zones(nullb->disk); + } - blk_queue_chunk_sectors(q, nullb->dev->zone_size_sects); - q->nr_zones = blkdev_nr_zones(nullb->disk); + blk_queue_max_zone_append_sectors(q, + min_t(sector_t, q->limits.max_hw_sectors, + dev->zone_size_sects)); return 0; } @@ -133,7 +142,7 @@ size_t null_zone_valid_read_len(struct nullb *nullb, } static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, - unsigned int nr_sectors) + unsigned int nr_sectors, bool append) { struct nullb_device *dev = cmd->nq->dev; unsigned int zno = null_zone_no(dev, sector); @@ -148,7 +157,20 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, case BLK_ZONE_COND_IMP_OPEN: case BLK_ZONE_COND_EXP_OPEN: case BLK_ZONE_COND_CLOSED: - /* Writes must be at the write pointer position */ + /* + * Regular writes must be at the write pointer position. + * Zone append writes are automatically issued at the write + * pointer and the position returned using the request or BIO + * sector. + */ + if (append) { + sector = zone->wp; + if (cmd->bio) + cmd->bio->bi_iter.bi_sector = sector; + else + cmd->rq->__sector = sector; + } + if (sector != zone->wp) return BLK_STS_IOERR; @@ -228,7 +250,9 @@ blk_status_t null_handle_zoned(struct nullb_cmd *cmd, enum req_opf op, { switch (op) { case REQ_OP_WRITE: - return null_zone_write(cmd, sector, nr_sectors); + return null_zone_write(cmd, sector, nr_sectors, false); + case REQ_OP_ZONE_APPEND: + return null_zone_write(cmd, sector, nr_sectors, true); case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_RESET_ALL: case REQ_OP_ZONE_OPEN: -- 2.24.1