On Thu, Jul 12, 2018 at 01:29:40PM -0400, Johannes Weiner wrote: > +static bool psi_update_stats(struct psi_group *group) > +{ > + for_each_online_cpu(cpu) { > + struct psi_group_cpu *groupc = per_cpu_ptr(group->cpus, cpu); > + unsigned long nonidle; > + > + if (!groupc->nonidle_time) > + continue; > + > + nonidle = nsecs_to_jiffies(groupc->nonidle_time); > + groupc->nonidle_time = 0; > + nonidle_total += nonidle; > + > + for (r = 0; r < NR_PSI_RESOURCES; r++) { > + struct psi_resource *res = &groupc->res[r]; > + > + some[r] += (res->times[0] + res->times[1]) * nonidle; > + full[r] += res->times[1] * nonidle; > + > + /* It's racy, but we can tolerate some error */ > + res->times[0] = 0; > + res->times[1] = 0; > + } > + } An alternative for this, that also allows that ondemand update, but without spamming the rq->lock would be something like: struct psi_group_cpu { u32 tasks[3]; u32 cpu_state : 2; u32 mem_state : 2; u32 io_state : 2; u32 :0; u64 last_update_time; u32 nonidle; u32 full[2]; u32 some[3]; } ____cacheline_aligned_in_smp; /* Allocate _2_ copies */ DEFINE_PER_CPU_ALIGNED_SHARED(struct psi_group_cpu[2], psi_cpus); struct psi_group global_psi = { .cpus = &psi_cpus[0], }; u64 sums[6] = { 0, }; for_each_possible_cpu(cpu) { struct psi_group_cpu *pgc = per_cpu_ptr(group->cpus, cpu); u32 *active, *shadow; active = &pgc[0].nonidle; shadow = &pgc[1].nonidle; /* * Compare the active count to the shadow count * if different, compute the delta and update the shadow * copy. * This only writes to the shadow copy (separate line) * and leaves the active a read-only access. */ for (i = 0; i < 6; i++) { u32 old = READ_ONCE(shadow[i]); u32 new = READ_ONCE(active[i]); delta = (new - old); if (!delta) { if (!i) goto next; continue; } WRITE_ONCE(shadow[i], new); sums[i] += delta; } next: ; }