3.14-stable review patch. If anyone has any objections, please let me know. ------------------ From: Mike Snitzer <snitzer@xxxxxxxxxx> commit d132cc6d9e92424bb9d4fd35f5bd0e55d583f4be upstream. If the discard block size is larger than the cache block size we will not properly quiesce IO to a region that is about to be discarded. This results in a race between a cache migration where no copy is needed, and a write to an adjacent cache block that's within the same large discard block. Workaround this by limiting the discard_block_size to cache_block_size. Also limit the max_discard_sectors to cache_block_size. A more comprehensive fix that introduces range locking support in the bio_prison and proper quiescing of a discard range that spans multiple cache blocks is already in development. Reported-by: Morgan Mears <Morgan.Mears@xxxxxxxxxx> Signed-off-by: Mike Snitzer <snitzer@xxxxxxxxxx> Acked-by: Joe Thornber <ejt@xxxxxxxxxx> Acked-by: Heinz Mauelshagen <heinzm@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- drivers/md/dm-cache-target.c | 37 +++---------------------------------- 1 file changed, 3 insertions(+), 34 deletions(-) --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -239,7 +239,7 @@ struct cache { */ dm_dblock_t discard_nr_blocks; unsigned long *discard_bitset; - uint32_t discard_block_size; /* a power of 2 times sectors per block */ + uint32_t discard_block_size; /* * Rather than reconstructing the table line for the status we just @@ -2171,35 +2171,6 @@ static int create_cache_policy(struct ca return 0; } -/* - * We want the discard block size to be a power of two, at least the size - * of the cache block size, and have no more than 2^14 discard blocks - * across the origin. - */ -#define MAX_DISCARD_BLOCKS (1 << 14) - -static bool too_many_discard_blocks(sector_t discard_block_size, - sector_t origin_size) -{ - (void) sector_div(origin_size, discard_block_size); - - return origin_size > MAX_DISCARD_BLOCKS; -} - -static sector_t calculate_discard_block_size(sector_t cache_block_size, - sector_t origin_size) -{ - sector_t discard_block_size; - - discard_block_size = roundup_pow_of_two(cache_block_size); - - if (origin_size) - while (too_many_discard_blocks(discard_block_size, origin_size)) - discard_block_size *= 2; - - return discard_block_size; -} - #define DEFAULT_MIGRATION_THRESHOLD 2048 static int cache_create(struct cache_args *ca, struct cache **result) @@ -2321,9 +2292,7 @@ static int cache_create(struct cache_arg } clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); - cache->discard_block_size = - calculate_discard_block_size(cache->sectors_per_block, - cache->origin_sectors); + cache->discard_block_size = cache->sectors_per_block; cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); if (!cache->discard_bitset) { @@ -3120,7 +3089,7 @@ static void set_discard_limits(struct ca /* * FIXME: these limits may be incompatible with the cache device */ - limits->max_discard_sectors = cache->discard_block_size * 1024; + limits->max_discard_sectors = cache->discard_block_size; limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; } -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html