On 10/17/24 6:35 PM, Ming Lei wrote:
Recently we got several deadlock report[1][2][3] caused by blk_mq_freeze_queue
and blk_enter_queue().
Turns out the two are just like one rwsem, so model them as rwsem for
supporting lockdep:
1) model blk_mq_freeze_queue() as down_write_trylock()
- it is exclusive lock, so dependency with blk_enter_queue() is covered
- it is trylock because blk_mq_freeze_queue() are allowed to run concurrently
2) model blk_enter_queue() as down_read()
- it is shared lock, so concurrent blk_enter_queue() are allowed
- it is read lock, so dependency with blk_mq_freeze_queue() is modeled
- blk_queue_exit() is often called from other contexts(such as irq), and
it can't be annotated as rwsem_release(), so simply do it in
blk_enter_queue(), this way still covered cases as many as possible
NVMe is the only subsystem which may call blk_mq_freeze_queue() and
blk_mq_unfreeze_queue() from different context, so it is the only
exception for the modeling. Add one tagset flag to exclude it from
the lockdep support.
With lockdep support, such kind of reports may be reported asap and
needn't wait until the real deadlock is triggered.
For example, the following lockdep report can be triggered in the
report[3].
Hi Ming,
Thank you for having reported this issue and for having proposed a
patch. I think the following is missing from the patch description:
(a) An analysis of which code causes the inconsistent nested lock order.
(b) A discussion of potential alternatives.
It seems unavoidable to me that some code freezes request queue(s)
before the limits are updated. Additionally, there is code that freezes
queues first (sd_revalidate_disk()), executes commands and next updates
limits. Hence the inconsistent order of freezing queues and obtaining
limits_lock.
The alternative (entirely untested) solution below has the following
advantages:
* No additional information has to be provided to lockdep about the
locking order.
* No new flags are required (SKIP_FREEZE_LOCKDEP).
* No exceptions are necessary for blk_queue_exit() nor for the NVMe
driver.
Thanks,
Bart.
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 83b696ba0cac..50962ca037d7 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -211,6 +211,7 @@ static ssize_t flag_store(struct device *dev, const
char *page, size_t count,
if (err)
return err;
+ blk_mq_freeze_queue(q);
/* note that the flags are inverted vs the values in the sysfs files */
lim = queue_limits_start_update(q);
if (val)
@@ -218,7 +219,6 @@ static ssize_t flag_store(struct device *dev, const
char *page, size_t count,
else
lim.integrity.flags |= flag;
- blk_mq_freeze_queue(q);
err = queue_limits_commit_update(q, &lim);
blk_mq_unfreeze_queue(q);
if (err)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 194417abc105..a3d2613bad1d 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -1106,12 +1106,12 @@ cache_type_store(struct device *dev, struct
device_attribute *attr,
virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
+ blk_mq_freeze_queue(disk->queue);
lim = queue_limits_start_update(disk->queue);
if (virtblk_get_cache_mode(vdev))
lim.features |= BLK_FEAT_WRITE_CACHE;
else
lim.features &= ~BLK_FEAT_WRITE_CACHE;
- blk_mq_freeze_queue(disk->queue);
i = queue_limits_commit_update(disk->queue, &lim);
blk_mq_unfreeze_queue(disk->queue);
if (i)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index ca4bc0ac76ad..1f6ab9768ac7 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -175,9 +175,9 @@ cache_type_store(struct device *dev, struct
device_attribute *attr,
sdkp->WCE = wce;
sdkp->RCD = rcd;
+ blk_mq_freeze_queue(sdkp->disk->queue);
lim = queue_limits_start_update(sdkp->disk->queue);
sd_set_flush_flag(sdkp, &lim);
- blk_mq_freeze_queue(sdkp->disk->queue);
ret = queue_limits_commit_update(sdkp->disk->queue, &lim);
blk_mq_unfreeze_queue(sdkp->disk->queue);
if (ret)
@@ -481,9 +481,9 @@ provisioning_mode_store(struct device *dev, struct
device_attribute *attr,
if (mode < 0)
return -EINVAL;
+ blk_mq_freeze_queue(sdkp->disk->queue);
lim = queue_limits_start_update(sdkp->disk->queue);
sd_config_discard(sdkp, &lim, mode);
- blk_mq_freeze_queue(sdkp->disk->queue);
err = queue_limits_commit_update(sdkp->disk->queue, &lim);
blk_mq_unfreeze_queue(sdkp->disk->queue);
if (err)
@@ -592,9 +592,9 @@ max_write_same_blocks_store(struct device *dev,
struct device_attribute *attr,
sdkp->max_ws_blocks = max;
}
+ blk_mq_freeze_queue(sdkp->disk->queue);
lim = queue_limits_start_update(sdkp->disk->queue);
sd_config_write_same(sdkp, &lim);
- blk_mq_freeze_queue(sdkp->disk->queue);
err = queue_limits_commit_update(sdkp->disk->queue, &lim);
blk_mq_unfreeze_queue(sdkp->disk->queue);
if (err)
@@ -3700,7 +3700,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
sector_t old_capacity = sdkp->capacity;
- struct queue_limits lim;
+ struct queue_limits lim = {};
unsigned char *buffer;
unsigned int dev_max;
int err;
@@ -3724,8 +3724,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_spinup_disk(sdkp);
- lim = queue_limits_start_update(sdkp->disk->queue);
-
/*
* Without media there is no reason to ask; moreover, some devices
* react badly if we do.
@@ -3804,7 +3802,36 @@ static int sd_revalidate_disk(struct gendisk *disk)
kfree(buffer);
blk_mq_freeze_queue(sdkp->disk->queue);
- err = queue_limits_commit_update(sdkp->disk->queue, &lim);
+ {
+ struct queue_limits lim2 =
+ queue_limits_start_update(sdkp->disk->queue);
+ /* Keep the lim2 member assignments below in alphabetical order. */
+ lim2.alignment_offset = lim.alignment_offset;
+ lim2.atomic_write_hw_boundary = lim.atomic_write_hw_boundary;
+ lim2.atomic_write_hw_max = lim.atomic_write_hw_max;
+ lim2.atomic_write_hw_unit_max = lim.atomic_write_hw_unit_max;
+ lim2.atomic_write_hw_unit_min = lim.atomic_write_hw_unit_min;
+ lim2.chunk_sectors = lim.chunk_sectors;
+ lim2.discard_alignment = lim.discard_alignment;
+ lim2.discard_granularity = lim.discard_granularity;
+ lim2.dma_alignment = lim.dma_alignment;
+ lim2.features |= lim.features;
+ lim2.integrity = lim.integrity;
+ lim2.logical_block_size = lim.logical_block_size;
+ lim2.max_active_zones = lim.max_active_zones;
+ lim2.max_hw_discard_sectors = lim.max_hw_discard_sectors;
+ lim2.max_hw_sectors = lim.max_hw_sectors;
+ lim2.max_integrity_segments = lim.max_integrity_segments;
+ lim2.max_open_zones = lim.max_open_zones;
+ lim2.max_segment_size = lim.max_segment_size;
+ lim2.max_segments = lim.max_segments;
+ lim2.max_write_zeroes_sectors = lim.max_write_zeroes_sectors;
+ lim2.max_zone_append_sectors = lim.max_zone_append_sectors;
+ lim2.physical_block_size = lim.physical_block_size;
+ lim2.virt_boundary_mask = lim.virt_boundary_mask;
+ lim2.zone_write_granularity = lim.zone_write_granularity;
+ err = queue_limits_commit_update(sdkp->disk->queue, &lim2);
+ }
blk_mq_unfreeze_queue(sdkp->disk->queue);
if (err)
return err;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 198bec87bb8e..e9c1673ce6bb 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -795,9 +795,9 @@ static int get_sectorsize(struct scsi_cd *cd)
set_capacity(cd->disk, cd->capacity);
}
+ blk_mq_freeze_queue(q);
lim = queue_limits_start_update(q);
lim.logical_block_size = sector_size;
- blk_mq_freeze_queue(q);
err = queue_limits_commit_update(q, &lim);
blk_mq_unfreeze_queue(q);
return err;