Let deadline_next_request() only consider the first zoned write per zone. This patch fixes a race condition between deadline_next_request() and completion of zoned writes. Cc: Damien Le Moal <damien.lemoal@xxxxxxxxxxxxxxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Cc: Ming Lei <ming.lei@xxxxxxxxxx> Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx> --- block/mq-deadline.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 3122c471f473..32a2cc013ed3 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -302,6 +302,7 @@ static bool deadline_is_seq_write(struct deadline_data *dd, struct request *rq) return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq); } +#ifdef CONFIG_BLK_DEV_ZONED /* * Skip all write requests that are sequential from @rq, even if we cross * a zone boundary. @@ -318,6 +319,7 @@ static struct request *deadline_skip_seq_writes(struct deadline_data *dd, return rq; } +#endif /* * For the specified data direction, return the next request to @@ -386,9 +388,25 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio, */ spin_lock_irqsave(&dd->zone_lock, flags); while (rq) { + unsigned int zno __maybe_unused; + if (blk_req_can_dispatch_to_zone(rq)) break; + +#ifdef CONFIG_BLK_DEV_ZONED + zno = blk_rq_zone_no(rq); + rq = deadline_skip_seq_writes(dd, rq); + + /* + * Skip all other write requests for the zone with zone number + * 'zno'. This prevents that this function selects a zoned write + * that is not the first write for a given zone. + */ + while (rq && blk_rq_zone_no(rq) == zno && + blk_rq_is_seq_zoned_write(rq)) + rq = deadline_latter_request(rq); +#endif } spin_unlock_irqrestore(&dd->zone_lock, flags);