Mappings could be processed in descending logical block order, particularly if buffered IO is used. This could adversely affect the latency of IO processing. Fix this by adding mappings to the end of the 'prepared_mappings' and 'prepared_discards' lists. Signed-off-by: Mike Snitzer <snitzer@xxxxxxxxxx> Acked-by: Joe Thornber <ejt@xxxxxxxxxx> --- drivers/md/dm-thin.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index b721d7a..850e7cd 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -535,7 +535,7 @@ static void __maybe_add_mapping(struct dm_thin_new_mapping *m) struct pool *pool = m->tc->pool; if (m->quiesced && m->prepared) { - list_add(&m->list, &pool->prepared_mappings); + list_add_tail(&m->list, &pool->prepared_mappings); wake_worker(pool); } } @@ -1062,7 +1062,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio) if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) { spin_lock_irqsave(&pool->lock, flags); - list_add(&m->list, &pool->prepared_discards); + list_add_tail(&m->list, &pool->prepared_discards); spin_unlock_irqrestore(&pool->lock, flags); wake_worker(pool); } @@ -2923,7 +2923,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err) if (!list_empty(&work)) { spin_lock_irqsave(&pool->lock, flags); list_for_each_entry_safe(m, tmp, &work, list) - list_add(&m->list, &pool->prepared_discards); + list_add_tail(&m->list, &pool->prepared_discards); spin_unlock_irqrestore(&pool->lock, flags); wake_worker(pool); } -- 1.8.1.4 -- dm-devel mailing list dm-devel@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/dm-devel