Don't stuff the values directly into the queue without any synchronization, but instead delay applying the queue limits in the caller and let dm_set_zones_restrictions work on the limit structure. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- drivers/md/dm-table.c | 16 +++++++++------- drivers/md/dm-zone.c | 11 ++++++----- drivers/md/dm.h | 3 ++- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e291b78b307b13..6e4ee829005e92 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1981,10 +1981,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (!dm_table_supports_secure_erase(t)) limits->max_secure_erase_sectors = 0; - r = queue_limits_set(q, limits); - if (r) - return r; - if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { wc = true; if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA))) @@ -2036,12 +2032,18 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, * For a zoned target, setup the zones related queue attributes * and resources necessary for zone append emulation if necessary. */ - if (blk_queue_is_zoned(q)) { - r = dm_set_zones_restrictions(t, q); - if (r) + if (limits->zoned) { + r = dm_set_zones_restrictions(t, q, limits); + if (r) { + queue_limits_cancel_update(q); return r; + } } + r = queue_limits_set(q, limits); + if (r) + return r; + dm_update_crypto_profile(q, t); /* diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index 13b4857d98fe5b..719238aa3006ba 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -220,7 +220,8 @@ static bool dm_table_supports_zone_append(struct dm_table *t) return true; } -int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q) +int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, + struct queue_limits *lim) { struct mapped_device *md = t->md; struct gendisk *disk = md->disk; @@ -236,7 +237,7 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q) clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); } else { set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); - blk_queue_max_zone_append_sectors(q, 0); + lim->max_zone_append_sectors = 0; } if (!get_capacity(md->disk)) @@ -260,9 +261,9 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q) * a regular device. */ if (nr_conv_zones >= ret) { - disk->queue->limits.max_open_zones = 0; - disk->queue->limits.max_active_zones = 0; - disk->queue->limits.zoned = false; + lim->max_open_zones = 0; + lim->max_active_zones = 0; + lim->zoned = false; clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); disk->nr_zones = 0; return 0; diff --git a/drivers/md/dm.h b/drivers/md/dm.h index e0c57f19839b29..53ef8207fe2c15 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -101,7 +101,8 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t); /* * Zoned targets related functions. */ -int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q); +int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, + struct queue_limits *lim); void dm_zone_endio(struct dm_io *io, struct bio *clone); #ifdef CONFIG_BLK_DEV_ZONED int dm_blk_report_zones(struct gendisk *disk, sector_t sector, -- 2.43.0