On Fri, Apr 24, 2020 at 03:47:28PM +0200, Hannes Reinecke wrote: > On 4/24/20 12:23 PM, Ming Lei wrote: > > Flush requests aren't same with normal FS request: > > > > 1) one dedicated per-hctx flush rq is pre-allocated for sending flush request > > > > 2) flush request si issued to hardware via one machinary so that flush merge > > can be applied > > > > We can't simply re-submit flush rqs via blk_steal_bios(), so add > > blk_end_flush_machinery to collect flush requests which needs to > > be resubmitted: > > > > - if one flush command without DATA is enough, send one flush, complete this > > kind of requests > > > > - otherwise, add the request into a list and let caller re-submit it. > > > > Cc: John Garry <john.garry@xxxxxxxxxx> > > Cc: Bart Van Assche <bvanassche@xxxxxxx> > > Cc: Hannes Reinecke <hare@xxxxxxxx> > > Cc: Christoph Hellwig <hch@xxxxxx> > > Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> > > Reviewed-by: Christoph Hellwig <hch@xxxxxx> > > Signed-off-by: Ming Lei <ming.lei@xxxxxxxxxx> > > --- > > block/blk-flush.c | 123 +++++++++++++++++++++++++++++++++++++++++++--- > > block/blk.h | 4 ++ > > 2 files changed, 120 insertions(+), 7 deletions(-) > > > > diff --git a/block/blk-flush.c b/block/blk-flush.c > > index 977edf95d711..745d878697ed 100644 > > --- a/block/blk-flush.c > > +++ b/block/blk-flush.c > > @@ -170,10 +170,11 @@ static void blk_flush_complete_seq(struct request *rq, > > unsigned int cmd_flags; > > BUG_ON(rq->flush.seq & seq); > > - rq->flush.seq |= seq; > > + if (!error) > > + rq->flush.seq |= seq; > > cmd_flags = rq->cmd_flags; > > - if (likely(!error)) > > + if (likely(!error && !fq->flush_queue_terminating)) > > seq = blk_flush_cur_seq(rq); > > else > > seq = REQ_FSEQ_DONE; > > @@ -200,9 +201,15 @@ static void blk_flush_complete_seq(struct request *rq, > > * normal completion and end it. > > */ > > BUG_ON(!list_empty(&rq->queuelist)); > > - list_del_init(&rq->flush.list); > > - blk_flush_restore_request(rq); > > - blk_mq_end_request(rq, error); > > + > > + /* Terminating code will end the request from flush queue */ > > + if (likely(!fq->flush_queue_terminating)) { > > + list_del_init(&rq->flush.list); > > + blk_flush_restore_request(rq); > > + blk_mq_end_request(rq, error); > > + } else { > > + list_move_tail(&rq->flush.list, pending); > > + } > > break; > > default: > > @@ -279,7 +286,8 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, > > struct request *flush_rq = fq->flush_rq; > > /* C1 described at the top of this file */ > > - if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) > > + if (fq->flush_pending_idx != fq->flush_running_idx || > > + list_empty(pending) || fq->flush_queue_terminating) > > return; > > /* C2 and C3 > > @@ -331,7 +339,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) > > struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); > > if (q->elevator) { > > - WARN_ON(rq->tag < 0); > > + WARN_ON(rq->tag < 0 && !fq->flush_queue_terminating); > > blk_mq_put_driver_tag(rq); > > } > > @@ -503,3 +511,104 @@ void blk_free_flush_queue(struct blk_flush_queue *fq) > > kfree(fq->flush_rq); > > kfree(fq); > > } > > + > > +static void __blk_end_queued_flush(struct blk_flush_queue *fq, > > + unsigned int queue_idx, struct list_head *resubmit_list, > > + struct list_head *flush_list) > > +{ > > + struct list_head *queue = &fq->flush_queue[queue_idx]; > > + struct request *rq, *nxt; > > + > > + list_for_each_entry_safe(rq, nxt, queue, flush.list) { > > + unsigned int seq = blk_flush_cur_seq(rq); > > + > > + list_del_init(&rq->flush.list); > > + blk_flush_restore_request(rq); > > + if (!blk_rq_sectors(rq) || seq == REQ_FSEQ_POSTFLUSH ) > > + list_add_tail(&rq->queuelist, flush_list); > > + else > > + list_add_tail(&rq->queuelist, resubmit_list); > > + } > > +} > > + > > +static void blk_end_queued_flush(struct blk_flush_queue *fq, > > + struct list_head *resubmit_list, struct list_head *flush_list) > > +{ > > + unsigned long flags; > > + > > + spin_lock_irqsave(&fq->mq_flush_lock, flags); > > + __blk_end_queued_flush(fq, 0, resubmit_list, flush_list); > > + __blk_end_queued_flush(fq, 1, resubmit_list, flush_list); > > + spin_unlock_irqrestore(&fq->mq_flush_lock, flags); > > +} > > + > > +/* complete requests which just requires one flush command */ > > +static void blk_complete_flush_requests(struct blk_flush_queue *fq, > > + struct list_head *flush_list) > > +{ > > + struct block_device *bdev; > > + struct request *rq; > > + int error = -ENXIO; > > + > > + if (list_empty(flush_list)) > > + return; > > + > > + rq = list_first_entry(flush_list, struct request, queuelist); > > + > > + /* Send flush via one active hctx so we can move on */ > > + bdev = bdget_disk(rq->rq_disk, 0); > > + if (bdev) { > > + error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL); > > + bdput(bdev); > > + } > > + > > + while (!list_empty(flush_list)) { > > + rq = list_first_entry(flush_list, struct request, queuelist); > > + list_del_init(&rq->queuelist); > > + blk_mq_end_request(rq, error); > > + } > > +} > > + > I must admit I'm getting nervous if one mixes direct request manipulations > with high-level 'blkdev_XXX' calls. > Can't we just requeue everything including the flush itself, and letting the > requeue algorithm pick a new hctx? No, the biggest problem is that rq->mq_hctx is bound to the hctx(becomes inactive now) since request allocation, so we can't requeue them simply. What we need here is simply one flush command, blkdev_issue_flush() does exactly what we want. Thanks, Ming