This is a note to let you know that I've just added the patch titled dm cache: fix a race condition between queuing new migrations and quiescing for a shutdown to the 3.10-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: dm-cache-fix-a-race-condition-between-queuing-new-migrations-and-quiescing-for-a-shutdown.patch and it can be found in the queue-3.10 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From 66cb1910df17b38334153462ec8166e48058035f Mon Sep 17 00:00:00 2001 From: Joe Thornber <ejt@xxxxxxxxxx> Date: Wed, 30 Oct 2013 17:11:58 +0000 Subject: dm cache: fix a race condition between queuing new migrations and quiescing for a shutdown From: Joe Thornber <ejt@xxxxxxxxxx> commit 66cb1910df17b38334153462ec8166e48058035f upstream. The code that was trying to do this was inadequate. The postsuspend method (in ioctl context), needs to wait for the worker thread to acknowledge the request to quiesce. Otherwise the migration count may drop to zero temporarily before the worker thread realises we're quiescing. In this case the target will be taken down, but the worker thread may have issued a new migration, which will cause an oops when it completes. Signed-off-by: Joe Thornber <ejt@xxxxxxxxxx> Signed-off-by: Mike Snitzer <snitzer@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- drivers/md/dm-cache-target.c | 54 +++++++++++++++++++++++++++++++------------ 1 file changed, 40 insertions(+), 14 deletions(-) --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -151,6 +151,9 @@ struct cache { atomic_t nr_migrations; wait_queue_head_t migration_wait; + wait_queue_head_t quiescing_wait; + atomic_t quiescing_ack; + /* * cache_size entries, dirty if set */ @@ -742,8 +745,9 @@ static void cell_defer(struct cache *cac static void cleanup_migration(struct dm_cache_migration *mg) { - dec_nr_migrations(mg->cache); + struct cache *cache = mg->cache; free_migration(mg); + dec_nr_migrations(cache); } static void migration_failure(struct dm_cache_migration *mg) @@ -1340,34 +1344,51 @@ static void writeback_some_dirty_blocks( /*---------------------------------------------------------------- * Main worker loop *--------------------------------------------------------------*/ -static void start_quiescing(struct cache *cache) +static bool is_quiescing(struct cache *cache) { + int r; unsigned long flags; spin_lock_irqsave(&cache->lock, flags); - cache->quiescing = 1; + r = cache->quiescing; spin_unlock_irqrestore(&cache->lock, flags); + + return r; } -static void stop_quiescing(struct cache *cache) +static void ack_quiescing(struct cache *cache) +{ + if (is_quiescing(cache)) { + atomic_inc(&cache->quiescing_ack); + wake_up(&cache->quiescing_wait); + } +} + +static void wait_for_quiescing_ack(struct cache *cache) +{ + wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack)); +} + +static void start_quiescing(struct cache *cache) { unsigned long flags; spin_lock_irqsave(&cache->lock, flags); - cache->quiescing = 0; + cache->quiescing = true; spin_unlock_irqrestore(&cache->lock, flags); + + wait_for_quiescing_ack(cache); } -static bool is_quiescing(struct cache *cache) +static void stop_quiescing(struct cache *cache) { - int r; unsigned long flags; spin_lock_irqsave(&cache->lock, flags); - r = cache->quiescing; + cache->quiescing = false; spin_unlock_irqrestore(&cache->lock, flags); - return r; + atomic_set(&cache->quiescing_ack, 0); } static void wait_for_migrations(struct cache *cache) @@ -1414,16 +1435,15 @@ static void do_worker(struct work_struct struct cache *cache = container_of(ws, struct cache, worker); do { - if (!is_quiescing(cache)) + if (!is_quiescing(cache)) { + writeback_some_dirty_blocks(cache); + process_deferred_writethrough_bios(cache); process_deferred_bios(cache); + } process_migrations(cache, &cache->quiesced_migrations, issue_copy); process_migrations(cache, &cache->completed_migrations, complete_migration); - writeback_some_dirty_blocks(cache); - - process_deferred_writethrough_bios(cache); - if (commit_if_needed(cache)) { process_deferred_flush_bios(cache, false); @@ -1436,6 +1456,9 @@ static void do_worker(struct work_struct process_migrations(cache, &cache->need_commit_migrations, migration_success_post_commit); } + + ack_quiescing(cache); + } while (more_work(cache)); } @@ -1998,6 +2021,9 @@ static int cache_create(struct cache_arg atomic_set(&cache->nr_migrations, 0); init_waitqueue_head(&cache->migration_wait); + init_waitqueue_head(&cache->quiescing_wait); + atomic_set(&cache->quiescing_ack, 0); + r = -ENOMEM; cache->nr_dirty = 0; cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); Patches currently in stable-queue which might be from ejt@xxxxxxxxxx are queue-3.10/dm-cache-fix-a-race-condition-between-queuing-new-migrations-and-quiescing-for-a-shutdown.patch queue-3.10/dm-array-fix-bug-in-growing-array.patch -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html