__disk_unblock_events() will call queue_delayed_work() with a '0' argument under a spin lock. This might cause the queue_work item to be executed immediately, and run into a deadlock in disk_check_events() waiting for the lock to be released. This patch converts the 'blocked' counter into an atomic variable, so we don't need to hold a spinlock anymore when scheduling the workqueue function. Signed-off-by: Hannes Reinecke <hare@xxxxxxx> --- block/genhd.c | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/block/genhd.c b/block/genhd.c index 9f8cb7beaad1..07e70f0c9c25 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1379,7 +1379,7 @@ struct disk_events { spinlock_t lock; struct mutex block_mutex; /* protects blocking */ - int block; /* event blocking depth */ + atomic_t block; /* event blocking depth */ unsigned int pending; /* events already sent out */ unsigned int clearing; /* events being cleared */ @@ -1439,8 +1439,6 @@ static unsigned long disk_events_poll_jiffies(struct gendisk *disk) void disk_block_events(struct gendisk *disk) { struct disk_events *ev = disk->ev; - unsigned long flags; - bool cancel; if (!ev) return; @@ -1451,11 +1449,7 @@ void disk_block_events(struct gendisk *disk) */ mutex_lock(&ev->block_mutex); - spin_lock_irqsave(&ev->lock, flags); - cancel = !ev->block++; - spin_unlock_irqrestore(&ev->lock, flags); - - if (cancel) + if (atomic_inc_return(&ev->block) == 1) cancel_delayed_work_sync(&disk->ev->dwork); mutex_unlock(&ev->block_mutex); @@ -1467,23 +1461,19 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now) unsigned long intv; unsigned long flags; + if (atomic_dec_return(&ev->block) <= 0) { + mutex_unlock(&ev->block_mutex); + return; + } spin_lock_irqsave(&ev->lock, flags); - - if (WARN_ON_ONCE(ev->block <= 0)) - goto out_unlock; - - if (--ev->block) - goto out_unlock; - intv = disk_events_poll_jiffies(disk); + spin_unlock_irqrestore(&ev->lock, flags); if (check_now) queue_delayed_work(system_freezable_power_efficient_wq, &ev->dwork, 0); else if (intv) queue_delayed_work(system_freezable_power_efficient_wq, &ev->dwork, intv); -out_unlock: - spin_unlock_irqrestore(&ev->lock, flags); } /** @@ -1523,10 +1513,10 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask) spin_lock_irq(&ev->lock); ev->clearing |= mask; - if (!ev->block) + spin_unlock_irq(&ev->lock); + if (!atomic_read(&ev->block)) mod_delayed_work(system_freezable_power_efficient_wq, &ev->dwork, 0); - spin_unlock_irq(&ev->lock); } /** @@ -1638,11 +1628,11 @@ static void disk_check_events(struct disk_events *ev, *clearing_ptr &= ~clearing; intv = disk_events_poll_jiffies(disk); - if (!ev->block && intv) + spin_unlock_irq(&ev->lock); + if (!atomic_read(&ev->block) && intv) queue_delayed_work(system_freezable_power_efficient_wq, &ev->dwork, intv); - spin_unlock_irq(&ev->lock); /* * Tell userland about new events. Only the events listed in @@ -1807,7 +1797,7 @@ static void disk_alloc_events(struct gendisk *disk) ev->disk = disk; spin_lock_init(&ev->lock); mutex_init(&ev->block_mutex); - ev->block = 1; + atomic_set(&ev->block, 1); ev->poll_msecs = -1; INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); @@ -1851,6 +1841,6 @@ static void disk_del_events(struct gendisk *disk) static void disk_release_events(struct gendisk *disk) { /* the block count should be 1 from disk_del_events() */ - WARN_ON_ONCE(disk->ev && disk->ev->block != 1); + WARN_ON_ONCE(disk->ev && atomic_read(&disk->ev->block) != 1); kfree(disk->ev); } -- 2.29.2