Hello, On Wed, Jul 28, 2021 at 05:01:41PM +0800, brookxu wrote: > diff --git a/block/blk-merge.c b/block/blk-merge.c > index a11b3b5..86ff943 100644 > --- a/block/blk-merge.c > +++ b/block/blk-merge.c > @@ -348,6 +348,8 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) > trace_block_split(split, (*bio)->bi_iter.bi_sector); > submit_bio_noacct(*bio); > *bio = split; > + > + blk_throtl_recharge_bio(*bio); Can you rename this blk_throtl_charge_bio_split()? > @@ -524,6 +537,11 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, > tg->idletime_threshold = DFL_IDLE_THRESHOLD; > tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD; > > + atomic_set(&tg->io_split_cnt[0], 0); > + atomic_set(&tg->io_split_cnt[1], 0); > + atomic_set(&tg->last_io_split_cnt[0], 0); > + atomic_set(&tg->last_io_split_cnt[1], 0); We likely don't need these. pd's zeroed on allocation. > @@ -877,10 +900,19 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) > else > tg->bytes_disp[rw] = 0; > > - if (tg->io_disp[rw] >= io_trim) > + if (tg_io_disp(tg, rw) >= io_trim) { Instead of checking this in multiple places, would it be simpler to transfer the atomic counters to the existing counters whenever we enter blk-throtl and leave the rest of the code as-is? Thanks. -- tejun