Callers may use this counter to optimize flushes Signed-off-by: Dmitry Monakhov <dmonakhov@xxxxxxxxxx> --- block/blk-core.c | 1 + block/blk-flush.c | 3 ++- include/linux/blkdev.h | 1 + 3 files changed, 4 insertions(+), 1 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 074b758..afb5a4b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -537,6 +537,7 @@ void blk_cleanup_queue(struct request_queue *q) spin_unlock_irq(lock); mutex_unlock(&q->sysfs_lock); + atomic_set(&q->flush_tag, 0); /* * Drain all requests queued before DYING marking. Set DEAD flag to * prevent that q->request_fn() gets invoked after draining finished. diff --git a/block/blk-flush.c b/block/blk-flush.c index cc2b827..b1adc75 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -203,7 +203,7 @@ static void flush_end_io(struct request *flush_rq, int error) /* account completion of the flush request */ q->flush_running_idx ^= 1; elv_completed_request(q, flush_rq); - + atomic_inc(&q->flush_tag); /* and push the waiting requests to the next stage */ list_for_each_entry_safe(rq, n, running, flush.list) { unsigned int seq = blk_flush_cur_seq(rq); @@ -268,6 +268,7 @@ static bool blk_kick_flush(struct request_queue *q) q->flush_rq.end_io = flush_end_io; q->flush_pending_idx ^= 1; + atomic_inc(&q->flush_tag); list_add_tail(&q->flush_rq.queuelist, &q->queue_head); return true; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 78feda9..e079fbd 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -416,6 +416,7 @@ struct request_queue { unsigned int flush_queue_delayed:1; unsigned int flush_pending_idx:1; unsigned int flush_running_idx:1; + atomic_t flush_tag; unsigned long flush_pending_since; struct list_head flush_queue[2]; struct list_head flush_data_in_flight; -- 1.7.1 -- To unsubscribe from this list: send the line "unsubscribe linux-ext4" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html