When submitting a REQ_OP_WRITE | REQ_FUA request to a zoned storage device, these requests must be passed to the (mq-deadline) I/O scheduler to ensure that these happen at the write pointer. It has been verfied that this patch prevents that write pointer violations happen sporadically when f2fs is using a zoned storage device. Cc: Christoph Hellwig <hch@xxxxxx> Cc: Ming Lei <ming.lei@xxxxxxxxxx> Cc: Damien Le Moal <damien.lemoal@xxxxxxxxxxxxxxxxxx> Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx> --- block/blk-mq.c | 31 ++++--------------------------- 1 file changed, 4 insertions(+), 27 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 5ee62b95f3e5..530aad95cc33 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2546,16 +2546,14 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last); } -static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule) +static void blk_mq_plug_issue(struct blk_plug *plug, bool from_schedule) { struct blk_mq_hw_ctx *hctx = NULL; struct request *rq; int queued = 0; - int errors = 0; while ((rq = rq_list_pop(&plug->mq_list))) { bool last = rq_list_empty(plug->mq_list); - blk_status_t ret; if (hctx != rq->mq_hctx) { if (hctx) @@ -2563,29 +2561,9 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule) hctx = rq->mq_hctx; } - ret = blk_mq_request_issue_directly(rq, last); - switch (ret) { - case BLK_STS_OK: - queued++; - break; - case BLK_STS_RESOURCE: - case BLK_STS_DEV_RESOURCE: - blk_mq_request_bypass_insert(rq, false, last); - blk_mq_commit_rqs(hctx, &queued, from_schedule); - return; - default: - blk_mq_end_request(rq, ret); - errors++; - break; - } + blk_mq_sched_insert_request(rq, /*at_head=*/false, + /*run_queue=*/last, /*async=*/false); } - - /* - * If we didn't flush the entire list, we could have told the driver - * there was more coming, but that turned out to be a lie. - */ - if (errors) - blk_mq_commit_rqs(hctx, &queued, from_schedule); } static void __blk_mq_flush_plug_list(struct request_queue *q, @@ -2655,8 +2633,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) return; } - blk_mq_run_dispatch_ops(q, - blk_mq_plug_issue_direct(plug, false)); + blk_mq_run_dispatch_ops(q, blk_mq_plug_issue(plug, false)); if (rq_list_empty(plug->mq_list)) return; }