On Wed, 2024-07-17 at 06:55 -1000, tj@xxxxxxxxxx wrote: > > External email : Please do not click links or open attachments until > you have verified the sender or the content. > Hello, > > Does something like the following work for you? > > Thanks. > > diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c > index 37e6cc91d576..ec1d191f5c83 100644 > --- a/block/blk-cgroup.c > +++ b/block/blk-cgroup.c > @@ -329,7 +329,6 @@ static struct blkcg_gq *blkg_alloc(struct blkcg > *blkcg, struct gendisk *disk, > INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn); > #endif > > -u64_stats_init(&blkg->iostat.sync); > for_each_possible_cpu(cpu) { > u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); > per_cpu_ptr(blkg->iostat_cpu, cpu)->blkg = blkg; > @@ -632,24 +631,26 @@ static void blkg_iostat_set(struct blkg_iostat > *dst, struct blkg_iostat *src) > static void __blkg_clear_stat(struct blkg_iostat_set *bis) > { > struct blkg_iostat cur = {0}; > -unsigned long flags; > > -flags = u64_stats_update_begin_irqsave(&bis->sync); > blkg_iostat_set(&bis->cur, &cur); > blkg_iostat_set(&bis->last, &cur); > -u64_stats_update_end_irqrestore(&bis->sync, flags); > } > > static void blkg_clear_stat(struct blkcg_gq *blkg) > { > +unsigned long flags; > int cpu; > > +raw_spin_lock_irqsave(&blkg_stat_lock, flags); > + > for_each_possible_cpu(cpu) { > struct blkg_iostat_set *s = per_cpu_ptr(blkg->iostat_cpu, cpu); > > __blkg_clear_stat(s); > } > __blkg_clear_stat(&blkg->iostat); > + > +raw_spin_unlock_irqrestore(&blkg_stat_lock, flags); > } > > static int blkcg_reset_stats(struct cgroup_subsys_state *css, > @@ -998,12 +999,10 @@ static void blkcg_iostat_update(struct blkcg_gq > *blkg, struct blkg_iostat *cur, > unsigned long flags; > > /* propagate percpu delta to global */ > -flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); > blkg_iostat_set(&delta, cur); > blkg_iostat_sub(&delta, last); > blkg_iostat_add(&blkg->iostat.cur, &delta); > blkg_iostat_add(last, &delta); > -u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); > } > > static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu) > @@ -1134,9 +1133,9 @@ static void blkcg_fill_root_iostats(void) > cpu_dkstats->sectors[STAT_DISCARD] << 9; > } > > -flags = u64_stats_update_begin_irqsave(&blkg->iostat.sync); > +raw_spin_lock_irqsave(&blkg_stat_lock, flags); > blkg_iostat_set(&blkg->iostat.cur, &tmp); > -u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); > +raw_spin_unlock_irqrestore(&blkg_stat_lock, flags); > } > } > > @@ -1145,7 +1144,6 @@ static void blkcg_print_one_stat(struct > blkcg_gq *blkg, struct seq_file *s) > struct blkg_iostat_set *bis = &blkg->iostat; > u64 rbytes, wbytes, rios, wios, dbytes, dios; > const char *dname; > -unsigned seq; > int i; > > if (!blkg->online) > @@ -1157,16 +1155,14 @@ static void blkcg_print_one_stat(struct > blkcg_gq *blkg, struct seq_file *s) > > seq_printf(s, "%s ", dname); > > -do { > -seq = u64_stats_fetch_begin(&bis->sync); > - > -rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; > -wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; > -dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD]; > -rios = bis->cur.ios[BLKG_IOSTAT_READ]; > -wios = bis->cur.ios[BLKG_IOSTAT_WRITE]; > -dios = bis->cur.ios[BLKG_IOSTAT_DISCARD]; > -} while (u64_stats_fetch_retry(&bis->sync, seq)); > +raw_spin_lock_irq(&blkg_stat_lock); > +rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; > +wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; > +dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD]; > +rios = bis->cur.ios[BLKG_IOSTAT_READ]; > +wios = bis->cur.ios[BLKG_IOSTAT_WRITE]; > +dios = bis->cur.ios[BLKG_IOSTAT_DISCARD]; > +raw_spin_unlock_irq(&blkg_stat_lock, flags); > > if (rbytes || wbytes || rios || wios) { > seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu > dbytes=%llu dios=%llu", I think this will work, but as I mentioned before, this issue is only on 32 bit SMP systems. Replacing u64 sync with spinlock will increase overhead on 64 bit systems, because u64 sync does nothing on 64 bit systems. However, if it is acceptable, we can proceed with this solution. -- boy.wu