From: "Dennis Zhou (Facebook)" <dennisszhou@xxxxxxxxx> blkg reference counting now uses percpu_ref rather than atomic_t. Let's make this consistent with css_tryget. This renames blkg_try_get to blkg_tryget and now returns a bool rather than the blkg or NULL. Signed-off-by: Dennis Zhou <dennisszhou@xxxxxxxxx> Reviewed-by: Josef Bacik <josef@xxxxxxxxxxxxxx> Acked-by: Tejun Heo <tj@xxxxxxxxxx> --- block/bio.c | 2 +- block/blk-cgroup.c | 3 +-- block/blk-iolatency.c | 2 +- include/linux/blk-cgroup.h | 14 ++++++-------- 4 files changed, 9 insertions(+), 12 deletions(-) diff --git a/block/bio.c b/block/bio.c index 3cc8fcd8b827..bed3328c92ff 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1971,7 +1971,7 @@ int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg) { if (unlikely(bio->bi_blkg)) return -EBUSY; - bio->bi_blkg = blkg_try_get_closest(blkg); + bio->bi_blkg = blkg_tryget_closest(blkg); return 0; } diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 1efd697c9019..0c9aeb30ba8e 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1793,8 +1793,7 @@ void blkcg_maybe_throttle_current(void) blkg = blkg_lookup(blkcg, q); if (!blkg) goto out; - blkg = blkg_try_get(blkg); - if (!blkg) + if (!blkg_tryget(blkg)) goto out; rcu_read_unlock(); diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index 9d7052bad6f7..5a4cec54c998 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -628,7 +628,7 @@ static void blkiolatency_timer_fn(struct timer_list *t) * We could be exiting, don't access the pd unless we have a * ref on the blkg. */ - if (!blkg_try_get(blkg)) + if (!blkg_tryget(blkg)) continue; iolat = blkg_to_lat(blkg); diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index f8edff271a17..b5bbd9bdf37e 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -494,27 +494,25 @@ static inline void blkg_get(struct blkcg_gq *blkg) } /** - * blkg_try_get - try and get a blkg reference + * blkg_tryget - try and get a blkg reference * @blkg: blkg to get * * This is for use when doing an RCU lookup of the blkg. We may be in the midst * of freeing this blkg, so we can only use it if the refcnt is not zero. */ -static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg) +static inline bool blkg_tryget(struct blkcg_gq *blkg) { - if (percpu_ref_tryget(&blkg->refcnt)) - return blkg; - return NULL; + return percpu_ref_tryget(&blkg->refcnt); } /** - * blkg_try_get_closest - try and get a blkg ref on the closet blkg + * blkg_tryget_closest - try and get a blkg ref on the closet blkg * @blkg: blkg to get * * This walks up the blkg tree to find the closest non-dying blkg and returns * the blkg that it did association with as it may not be the passed in blkg. */ -static inline struct blkcg_gq *blkg_try_get_closest(struct blkcg_gq *blkg) +static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg) { while (!percpu_ref_tryget(&blkg->refcnt)) blkg = blkg->parent; @@ -599,7 +597,7 @@ static inline struct request_list *blk_get_rl(struct request_queue *q, if (unlikely(!blkg)) blkg = __blkg_lookup_create(blkcg, q); - if (!blkg_try_get(blkg)) + if (!blkg_tryget(blkg)) goto rl_use_root; rcu_read_unlock(); -- 2.17.1