From: Marcelo Tosatti <mtosatti@xxxxxxxxxx> >From the vmstat shepherd, for CPUs that have the tick stopped, do not queue local work to flush the per-CPU vmstats, since in that case the flush is performed on return to userspace or when entering idle. Also cancel any delayed work on the local CPU, when entering idle on nohz full CPUs. Per-CPU pages can be freed remotely from housekeeping CPUs. Signed-off-by: Marcelo Tosatti <mtosatti@xxxxxxxxxx> --- mm/vmstat.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/mm/vmstat.c b/mm/vmstat.c index 472175642bd9..3b9a497965b4 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -29,6 +29,7 @@ #include <linux/page_ext.h> #include <linux/page_owner.h> #include <linux/migrate.h> +#include <linux/tick.h> #include "internal.h" @@ -1990,19 +1991,23 @@ static void vmstat_update(struct work_struct *w) */ void quiet_vmstat(void) { + struct delayed_work *dw; + if (system_state != SYSTEM_RUNNING) return; if (!is_vmstat_dirty()) return; + refresh_cpu_vm_stats(false); + /* - * Just refresh counters and do not care about the pending delayed - * vmstat_update. It doesn't fire that often to matter and canceling - * it would be too expensive from this path. - * vmstat_shepherd will take care about that for us. + * If the tick is stopped, cancel any delayed work to avoid + * interruptions to this CPU in the future. */ - refresh_cpu_vm_stats(false); + dw = &per_cpu(vmstat_work, smp_processor_id()); + if (delayed_work_pending(dw) && tick_nohz_tick_stopped()) + cancel_delayed_work(dw); } /* @@ -2024,6 +2029,9 @@ static void vmstat_shepherd(struct work_struct *w) for_each_online_cpu(cpu) { struct delayed_work *dw = &per_cpu(vmstat_work, cpu); + if (tick_nohz_tick_stopped_cpu(cpu)) + continue; + if (!delayed_work_pending(dw) && per_cpu(vmstat_dirty, cpu)) queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0); -- 2.37.1