The queue can flip to dying after we check if it is dying, and then we call __blk_get_queue(). This is a purely theoretical race, but just fix it. We do this by Using the atomic kobject_get_unless_zero() first, and *then* check if the queue is dying *after*. This issue was found while doing patch review on the recent blktrace fixes [0]. [0] https://lore.kernel.org/linux-block/20200415123434.GU11244@xxxxxxxxxxxxxxxxxxx/ Reported-by: Christoph Hellwig <hch@xxxxxxxxxxxxx> Cc: Jan Kara <jack@xxxxxxx> Cc: Ming Lei <ming.lei@xxxxxxxxxx> Cc: Bart Van Assche <bvanassche@xxxxxxx> Cc: Christoph Hellwig <hch@xxxxxxxxxxxxx> Signed-off-by: Luis Chamberlain <mcgrof@xxxxxxxxxx> --- This goes tested against blktest without finding a regression. block/blk-core.c | 14 ++++++++++---- block/blk.h | 5 +++-- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index d9d632639bd1..febdd8e8d409 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -605,12 +605,18 @@ EXPORT_SYMBOL(blk_alloc_queue); */ bool blk_get_queue(struct request_queue *q) { - if (likely(!blk_queue_dying(q))) { - __blk_get_queue(q); - return true; + struct kobject *obj; + + obj = __blk_get_queue(q); + if (!obj) + return false; + + if (unlikely(blk_queue_dying(q))) { + blk_put_queue(q); + return false; } - return false; + return true; } EXPORT_SYMBOL(blk_get_queue); diff --git a/block/blk.h b/block/blk.h index 49e2928a1632..bdbc9b084d5b 100644 --- a/block/blk.h +++ b/block/blk.h @@ -39,9 +39,10 @@ blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; } -static inline void __blk_get_queue(struct request_queue *q) +static inline struct kobject * __must_check +__blk_get_queue(struct request_queue *q) { - kobject_get(&q->kobj); + return kobject_get_unless_zero(&q->kobj); } static inline bool -- 2.27.0