On Mon 12-07-21 04:05:48, Matthew Wilcox (Oracle) wrote: > When batching events (such as writing back N pages in a single I/O), it > is better to do one flex_proportion operation instead of N. There is > only one caller of __fprop_inc_percpu_max(), and it's the one we're > going to change in the next patch, so rename it instead of adding a > compatibility wrapper. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> > Reviewed-by: Christoph Hellwig <hch@xxxxxx> Looks good. You can add: Reviewed-by: Jan Kara <jack@xxxxxxx> Honza > --- > include/linux/flex_proportions.h | 9 +++++---- > lib/flex_proportions.c | 28 +++++++++++++++++++--------- > mm/page-writeback.c | 4 ++-- > 3 files changed, 26 insertions(+), 15 deletions(-) > > diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h > index c12df59d3f5f..3e378b1fb0bc 100644 > --- a/include/linux/flex_proportions.h > +++ b/include/linux/flex_proportions.h > @@ -83,9 +83,10 @@ struct fprop_local_percpu { > > int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp); > void fprop_local_destroy_percpu(struct fprop_local_percpu *pl); > -void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl); > -void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl, > - int max_frac); > +void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl, > + long nr); > +void __fprop_add_percpu_max(struct fprop_global *p, > + struct fprop_local_percpu *pl, int max_frac, long nr); > void fprop_fraction_percpu(struct fprop_global *p, > struct fprop_local_percpu *pl, unsigned long *numerator, > unsigned long *denominator); > @@ -96,7 +97,7 @@ void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl) > unsigned long flags; > > local_irq_save(flags); > - __fprop_inc_percpu(p, pl); > + __fprop_add_percpu(p, pl, 1); > local_irq_restore(flags); > } > > diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c > index 451543937524..53e7eb1dd76c 100644 > --- a/lib/flex_proportions.c > +++ b/lib/flex_proportions.c > @@ -217,11 +217,12 @@ static void fprop_reflect_period_percpu(struct fprop_global *p, > } > > /* Event of type pl happened */ > -void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl) > +void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl, > + long nr) > { > fprop_reflect_period_percpu(p, pl); > - percpu_counter_add_batch(&pl->events, 1, PROP_BATCH); > - percpu_counter_add(&p->events, 1); > + percpu_counter_add_batch(&pl->events, nr, PROP_BATCH); > + percpu_counter_add(&p->events, nr); > } > > void fprop_fraction_percpu(struct fprop_global *p, > @@ -253,20 +254,29 @@ void fprop_fraction_percpu(struct fprop_global *p, > } > > /* > - * Like __fprop_inc_percpu() except that event is counted only if the given > + * Like __fprop_add_percpu() except that event is counted only if the given > * type has fraction smaller than @max_frac/FPROP_FRAC_BASE > */ > -void __fprop_inc_percpu_max(struct fprop_global *p, > - struct fprop_local_percpu *pl, int max_frac) > +void __fprop_add_percpu_max(struct fprop_global *p, > + struct fprop_local_percpu *pl, int max_frac, long nr) > { > if (unlikely(max_frac < FPROP_FRAC_BASE)) { > unsigned long numerator, denominator; > + s64 tmp; > > fprop_fraction_percpu(p, pl, &numerator, &denominator); > - if (numerator > > - (((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT) > + /* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */ > + tmp = (u64)denominator * max_frac - > + ((u64)numerator << FPROP_FRAC_SHIFT); > + if (tmp < 0) { > + /* Maximum fraction already exceeded? */ > return; > + } else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) { > + /* Add just enough for the fraction to saturate */ > + nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1, > + FPROP_FRAC_BASE - max_frac); > + } > } > > - __fprop_inc_percpu(p, pl); > + __fprop_add_percpu(p, pl, nr); > } > diff --git a/mm/page-writeback.c b/mm/page-writeback.c > index e677e79c7b9b..63c0dd9f8bf7 100644 > --- a/mm/page-writeback.c > +++ b/mm/page-writeback.c > @@ -566,8 +566,8 @@ static void wb_domain_writeout_inc(struct wb_domain *dom, > struct fprop_local_percpu *completions, > unsigned int max_prop_frac) > { > - __fprop_inc_percpu_max(&dom->completions, completions, > - max_prop_frac); > + __fprop_add_percpu_max(&dom->completions, completions, > + max_prop_frac, 1); > /* First event after period switching was turned off? */ > if (unlikely(!dom->period_time)) { > /* > -- > 2.30.2 > -- Jan Kara <jack@xxxxxxxx> SUSE Labs, CR