On 1/7/25 12:00 PM, Christoph Hellwig wrote: > When __blk_mq_update_nr_hw_queues changes the number of tag sets, it > might have to disable poll queues. Currently it does so by adjusting > the BLK_FEAT_POLL, which is a bit against the intent of features that > describe hardware / driver capabilities, but more importantly causes > nasty lock order problems with the broadly held freeze when updating the > number of hardware queues and the limits lock. Fix this by leaving > BLK_FEAT_POLL alone, and instead check for the number of poll queues in > the bio submission and poll handlers. While this adds extra work to the > fast path, the variables are in cache lines used by these operations > anyway, so it should be cheap enough. > > Fixes: 8023e144f9d6 ("block: move the poll flag to queue_limits") > Signed-off-by: Christoph Hellwig <hch@xxxxxx> > --- > block/blk-core.c | 17 ++++++++++++++--- > block/blk-mq.c | 17 +---------------- > 2 files changed, 15 insertions(+), 19 deletions(-) > > diff --git a/block/blk-core.c b/block/blk-core.c > index 666efe8fa202..bd5bec843d37 100644 > --- a/block/blk-core.c > +++ b/block/blk-core.c > @@ -753,6 +753,18 @@ static blk_status_t blk_validate_atomic_write_op_size(struct request_queue *q, > return BLK_STS_OK; > } > > +static bool bdev_can_poll(struct block_device *bdev) > +{ > + struct request_queue *q = bdev_get_queue(bdev); > + > + if (!(q->limits.features & BLK_FEAT_POLL)) > + return false; > + > + if (queue_is_mq(q)) > + return q->tag_set->map[HCTX_TYPE_POLL].nr_queues; > + return true; > +} > + As discussed in another thread with Damien, shouldn't we need to move bdev_can_poll() to header file? We also need to use this function while reading sysfs attribute "io-poll", no? > /** > * submit_bio_noacct - re-submit a bio to the block device layer for I/O > * @bio: The bio describing the location in memory and on the device. > @@ -805,8 +817,7 @@ void submit_bio_noacct(struct bio *bio) > } > } > > - if (!(q->limits.features & BLK_FEAT_POLL) && > - (bio->bi_opf & REQ_POLLED)) { > + if ((bio->bi_opf & REQ_POLLED) && !bdev_can_poll(bdev)) { > bio_clear_polled(bio); > goto not_supported; > } > @@ -935,7 +946,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) > return 0; > > q = bdev_get_queue(bdev); > - if (cookie == BLK_QC_T_NONE || !(q->limits.features & BLK_FEAT_POLL)) > + if (cookie == BLK_QC_T_NONE || !bdev_can_poll(bdev)) > return 0; > > blk_flush_plug(current->plug, false); > diff --git a/block/blk-mq.c b/block/blk-mq.c > index 2e6132f778fd..f795d81b6b38 100644 > --- a/block/blk-mq.c > +++ b/block/blk-mq.c > @@ -4320,12 +4320,6 @@ void blk_mq_release(struct request_queue *q) > blk_mq_sysfs_deinit(q); > } > > -static bool blk_mq_can_poll(struct blk_mq_tag_set *set) > -{ > - return set->nr_maps > HCTX_TYPE_POLL && > - set->map[HCTX_TYPE_POLL].nr_queues; > -} > - > struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set, > struct queue_limits *lim, void *queuedata) > { > @@ -4336,7 +4330,7 @@ struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set, > if (!lim) > lim = &default_lim; > lim->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT; > - if (blk_mq_can_poll(set)) > + if (set->nr_maps > HCTX_TYPE_POLL) > lim->features |= BLK_FEAT_POLL; > > q = blk_alloc_queue(lim, set->numa_node); > @@ -5024,8 +5018,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, > fallback: > blk_mq_update_queue_map(set); > list_for_each_entry(q, &set->tag_list, tag_set_list) { > - struct queue_limits lim; > - > blk_mq_realloc_hw_ctxs(set, q); > > if (q->nr_hw_queues != set->nr_hw_queues) { > @@ -5039,13 +5031,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, > set->nr_hw_queues = prev_nr_hw_queues; > goto fallback; > } > - lim = queue_limits_start_update(q); > - if (blk_mq_can_poll(set)) > - lim.features |= BLK_FEAT_POLL; > - else > - lim.features &= ~BLK_FEAT_POLL; > - if (queue_limits_commit_update(q, &lim) < 0) > - pr_warn("updating the poll flag failed\n"); > blk_mq_map_swqueue(q); > } >