On 11/4/21 12:40 PM, Jens Axboe wrote: > On 11/4/21 12:39 PM, Christoph Hellwig wrote: >> On Thu, Nov 04, 2021 at 12:37:25PM -0600, Jens Axboe wrote: >>> On 11/4/21 12:36 PM, Christoph Hellwig wrote: >>>>> +static inline bool blk_mq_queue_enter(struct request_queue *q, struct bio *bio) >>>>> +{ >>>>> + if (!blk_try_enter_queue(q, false) && bio_queue_enter(bio)) >>>>> + return false; >>>>> + return true; >>>>> +} >>>> >>>> Didn't we just agree on splitting bio_queue_enter into an inline helper >>>> and an out of line slowpath instead? >>> >>> See cover letter, and I also added to the commit message of this one. I do >>> think this approach is better, as bio_queue_enter() itself is just slow >>> path and there's no point polluting the code with 90% of what's in there. >>> >>> Hence I kept it as-is. >> >> Well, let me reword this then: why do you think the above is >> blk-mq secific and should not be used by every other caller of >> bio_queue_enter as well? In other words, why not rename >> bio_queue_enter __bio_queue_enter and make the above the public >> bio_queue_enter interface then? > > OK, that I can agree too. I'll respin it as such. Gets the job done as > well. Ala: diff --git a/block/blk-core.c b/block/blk-core.c index c2d267b6f910..0084067949d8 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -386,30 +386,6 @@ void blk_cleanup_queue(struct request_queue *q) } EXPORT_SYMBOL(blk_cleanup_queue); -static bool blk_try_enter_queue(struct request_queue *q, bool pm) -{ - rcu_read_lock(); - if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter)) - goto fail; - - /* - * The code that increments the pm_only counter must ensure that the - * counter is globally visible before the queue is unfrozen. - */ - if (blk_queue_pm_only(q) && - (!pm || queue_rpm_status(q) == RPM_SUSPENDED)) - goto fail_put; - - rcu_read_unlock(); - return true; - -fail_put: - blk_queue_exit(q); -fail: - rcu_read_unlock(); - return false; -} - /** * blk_queue_enter() - try to increase q->q_usage_counter * @q: request queue pointer @@ -442,10 +418,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) return 0; } -static inline int bio_queue_enter(struct bio *bio) +int __bio_queue_enter(struct request_queue *q, struct bio *bio) { - struct request_queue *q = bdev_get_queue(bio->bi_bdev); - while (!blk_try_enter_queue(q, false)) { struct gendisk *disk = bio->bi_bdev->bd_disk; diff --git a/block/blk.h b/block/blk.h index 7afffd548daf..814d9632d43e 100644 --- a/block/blk.h +++ b/block/blk.h @@ -55,6 +55,40 @@ void blk_free_flush_queue(struct blk_flush_queue *q); void blk_freeze_queue(struct request_queue *q); void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic); void blk_queue_start_drain(struct request_queue *q); +int __bio_queue_enter(struct request_queue *q, struct bio *bio); + +static inline bool blk_try_enter_queue(struct request_queue *q, bool pm) +{ + rcu_read_lock(); + if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter)) + goto fail; + + /* + * The code that increments the pm_only counter must ensure that the + * counter is globally visible before the queue is unfrozen. + */ + if (blk_queue_pm_only(q) && + (!pm || queue_rpm_status(q) == RPM_SUSPENDED)) + goto fail_put; + + rcu_read_unlock(); + return true; + +fail_put: + blk_queue_exit(q); +fail: + rcu_read_unlock(); + return false; +} + +static inline int bio_queue_enter(struct bio *bio) +{ + struct request_queue *q = bdev_get_queue(bio->bi_bdev); + + if (blk_try_enter_queue(q, false)) + return 0; + return __bio_queue_enter(q, bio); +} #define BIO_INLINE_VECS 4 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, -- Jens Axboe