[PATCH 2/3] block: move blkcg_bio_issue_check out of line

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



blkcg_bio_issue_check() is way to big to be an inline function, move it to
the other blkcg related functions in block/blk-cgroup.c.

According to the bloat-o-meter this brings it's sole caller
generic_make_request_checks() down from 2417 to 1881 bytes (~22%).

$ ./scripts/bloat-o-meter -t vmlinux.old vmlinux
add/remove: 1/0 grow/shrink: 1/1 up/down: 667/-539 (128)
Function                                     old     new   delta
blkcg_bio_issue_check                          -     664    +664
generic_make_request_checks.cold              45      48      +3
generic_make_request_checks                 2372    1833    -539
Total: Before=9624970, After=9625098, chg +0.00%

Signed-off-by: Johannes Thumshirn <johannes.thumshirn@xxxxxxx>
---
 block/blk-cgroup.c         | 56 ++++++++++++++++++++++++++++++++++++
 include/linux/blk-cgroup.h | 58 +-------------------------------------
 2 files changed, 57 insertions(+), 57 deletions(-)

diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index c5dc833212e1..9003c76124e8 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1706,6 +1706,62 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
 	set_notify_resume(current);
 }
 
+bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio)
+{
+	struct blkcg_gq *blkg;
+	bool throtl = false;
+
+	rcu_read_lock();
+
+	if (!bio->bi_blkg) {
+		char b[BDEVNAME_SIZE];
+
+		WARN_ONCE(1,
+			  "no blkg associated for bio on block-device: %s\n",
+			  bio_devname(bio, b));
+		bio_associate_blkg(bio);
+	}
+
+	blkg = bio->bi_blkg;
+
+	throtl = blk_throtl_bio(q, blkg, bio);
+
+	if (!throtl) {
+		struct blkg_iostat_set *bis;
+		int rwd, cpu;
+
+		if (op_is_discard(bio->bi_opf))
+			rwd = BLKG_IOSTAT_DISCARD;
+		else if (op_is_write(bio->bi_opf))
+			rwd = BLKG_IOSTAT_WRITE;
+		else
+			rwd = BLKG_IOSTAT_READ;
+
+		cpu = get_cpu();
+		bis = per_cpu_ptr(blkg->iostat_cpu, cpu);
+		u64_stats_update_begin(&bis->sync);
+
+		/*
+		 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
+		 * is a split bio and we would have already accounted for the
+		 * size of the bio.
+		 */
+		if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
+			bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
+		bis->cur.ios[rwd]++;
+
+		u64_stats_update_end(&bis->sync);
+		if (cgroup_subsys_on_dfl(io_cgrp_subsys))
+			cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu);
+		put_cpu();
+	}
+
+	blkcg_bio_issue_init(bio);
+
+	rcu_read_unlock();
+	return !throtl;
+}
+
 /**
  * blkcg_add_delay - add delay to this blkg
  * @blkg: blkg of interest
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 333885133b1f..b356d4eed08d 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -559,63 +559,6 @@ static inline void blkcg_bio_issue_init(struct bio *bio)
 	bio_issue_init(&bio->bi_issue, bio_sectors(bio));
 }
 
-static inline bool blkcg_bio_issue_check(struct request_queue *q,
-					 struct bio *bio)
-{
-	struct blkcg_gq *blkg;
-	bool throtl = false;
-
-	rcu_read_lock();
-
-	if (!bio->bi_blkg) {
-		char b[BDEVNAME_SIZE];
-
-		WARN_ONCE(1,
-			  "no blkg associated for bio on block-device: %s\n",
-			  bio_devname(bio, b));
-		bio_associate_blkg(bio);
-	}
-
-	blkg = bio->bi_blkg;
-
-	throtl = blk_throtl_bio(q, blkg, bio);
-
-	if (!throtl) {
-		struct blkg_iostat_set *bis;
-		int rwd, cpu;
-
-		if (op_is_discard(bio->bi_opf))
-			rwd = BLKG_IOSTAT_DISCARD;
-		else if (op_is_write(bio->bi_opf))
-			rwd = BLKG_IOSTAT_WRITE;
-		else
-			rwd = BLKG_IOSTAT_READ;
-
-		cpu = get_cpu();
-		bis = per_cpu_ptr(blkg->iostat_cpu, cpu);
-		u64_stats_update_begin(&bis->sync);
-
-		/*
-		 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
-		 * is a split bio and we would have already accounted for the
-		 * size of the bio.
-		 */
-		if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
-			bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
-		bis->cur.ios[rwd]++;
-
-		u64_stats_update_end(&bis->sync);
-		if (cgroup_subsys_on_dfl(io_cgrp_subsys))
-			cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu);
-		put_cpu();
-	}
-
-	blkcg_bio_issue_init(bio);
-
-	rcu_read_unlock();
-	return !throtl;
-}
-
 static inline void blkcg_use_delay(struct blkcg_gq *blkg)
 {
 	if (atomic_add_return(1, &blkg->use_delay) == 1)
@@ -666,6 +609,7 @@ static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
 	}
 }
 
+bool blkcg_bio_issue_check(struct request_queue *q, struct bio *bio);
 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
 void blkcg_maybe_throttle_current(void);
-- 
2.24.1




[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux