On 4/8/23 08:58, Bart Van Assche wrote: > Requeue requests instead of sending these to the dispatch list if a CPU > is unplugged to prevent reordering of zoned writes. > > Cc: Christoph Hellwig <hch@xxxxxx> > Cc: Damien Le Moal <damien.lemoal@xxxxxxxxxxxxxxxxxx> > Cc: Ming Lei <ming.lei@xxxxxxxxxx> > Cc: Mike Snitzer <snitzer@xxxxxxxxxx> > Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx> Looks OK. Reviewed-by: Damien Le Moal <dlemoal@xxxxxxxxxx> > --- > block/blk-mq.c | 14 +++++++++++--- > 1 file changed, 11 insertions(+), 3 deletions(-) > > diff --git a/block/blk-mq.c b/block/blk-mq.c > index 57315395434b..77fdaed4e074 100644 > --- a/block/blk-mq.c > +++ b/block/blk-mq.c > @@ -3495,9 +3495,17 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) > if (list_empty(&tmp)) > return 0; > > - spin_lock(&hctx->lock); > - list_splice_tail_init(&tmp, &hctx->dispatch); > - spin_unlock(&hctx->lock); > + if (hctx->queue->elevator) { > + struct request *rq, *next; > + > + list_for_each_entry_safe(rq, next, &tmp, queuelist) > + blk_mq_requeue_request(rq, false); > + blk_mq_kick_requeue_list(hctx->queue); > + } else { > + spin_lock(&hctx->lock); > + list_splice_tail_init(&tmp, &hctx->dispatch); > + spin_unlock(&hctx->lock); > + } > > blk_mq_run_hw_queue(hctx, true); > return 0;