The patch titled Subject: vmstat: switch per-cpu vmstat counters to 32-bits has been added to the -mm mm-unstable branch. Its filename is vmstat-switch-per-cpu-vmstat-counters-to-32-bits.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/vmstat-switch-per-cpu-vmstat-counters-to-32-bits.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Marcelo Tosatti <mtosatti@xxxxxxxxxx> Subject: vmstat: switch per-cpu vmstat counters to 32-bits Date: Mon, 13 Mar 2023 13:25:15 -0300 Some architectures only provide xchg/cmpxchg in 32/64-bit quantities. Since the next patch is about to use xchg on per-CPU vmstat counters, switch them to s32. Link: https://lkml.kernel.org/r/20230313162634.458451052@xxxxxxxxxx Signed-off-by: Marcelo Tosatti <mtosatti@xxxxxxxxxx> Cc: Aaron Tomlin <atomlin@xxxxxxxxxxx> Cc: Christian König <christian.koenig@xxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: Dan Williams <dan.j.williams@xxxxxxxxx> Cc: Frederic Weisbecker <frederic@xxxxxxxxxx> Cc: Heiko Carstens <hca@xxxxxxxxxxxxx> Cc: Huacai Chen <chenhuacai@xxxxxxxxxx> Cc: Jason Gunthorpe <jgg@xxxxxxxx> Cc: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> Cc: Lorenzo Stoakes <lstoakes@xxxxxxxxx> Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: Peter Xu <peterx@xxxxxxxxxx> Cc: "Russell King (Oracle)" <linux@xxxxxxxxxxxxxxx> Cc: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- --- a/include/linux/mmzone.h~vmstat-switch-per-cpu-vmstat-counters-to-32-bits +++ a/include/linux/mmzone.h @@ -689,8 +689,8 @@ struct per_cpu_pages { struct per_cpu_zonestat { #ifdef CONFIG_SMP - s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; - s8 stat_threshold; + s32 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; + s32 stat_threshold; #endif #ifdef CONFIG_NUMA /* @@ -703,8 +703,8 @@ struct per_cpu_zonestat { }; struct per_cpu_nodestat { - s8 stat_threshold; - s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; + s32 stat_threshold; + s32 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; }; #endif /* !__GENERATING_BOUNDS.H */ --- a/mm/vmstat.c~vmstat-switch-per-cpu-vmstat-counters-to-32-bits +++ a/mm/vmstat.c @@ -351,7 +351,7 @@ static inline void mod_zone_state(struct long delta, int overstep_mode) { struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; - s8 __percpu *p = pcp->vm_stat_diff + item; + s32 __percpu *p = pcp->vm_stat_diff + item; long o, n, t, z; do { @@ -428,7 +428,7 @@ static inline void mod_node_state(struct int delta, int overstep_mode) { struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; - s8 __percpu *p = pcp->vm_node_stat_diff + item; + s32 __percpu *p = pcp->vm_node_stat_diff + item; long o, n, t, z; if (vmstat_item_in_bytes(item)) { @@ -525,7 +525,7 @@ void __mod_zone_page_state(struct zone * long delta) { struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; - s8 __percpu *p = pcp->vm_stat_diff + item; + s32 __percpu *p = pcp->vm_stat_diff + item; long x; long t; @@ -556,7 +556,7 @@ void __mod_node_page_state(struct pglist long delta) { struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; - s8 __percpu *p = pcp->vm_node_stat_diff + item; + s32 __percpu *p = pcp->vm_node_stat_diff + item; long x; long t; @@ -614,8 +614,8 @@ EXPORT_SYMBOL(__mod_node_page_state); void __inc_zone_state(struct zone *zone, enum zone_stat_item item) { struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; - s8 __percpu *p = pcp->vm_stat_diff + item; - s8 v, t; + s32 __percpu *p = pcp->vm_stat_diff + item; + s32 v, t; /* See __mod_node_page_state */ preempt_disable_nested(); @@ -623,7 +623,7 @@ void __inc_zone_state(struct zone *zone, v = __this_cpu_inc_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v > t)) { - s8 overstep = t >> 1; + s32 overstep = t >> 1; zone_page_state_add(v + overstep, zone, item); __this_cpu_write(*p, -overstep); @@ -635,8 +635,8 @@ void __inc_zone_state(struct zone *zone, void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) { struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; - s8 __percpu *p = pcp->vm_node_stat_diff + item; - s8 v, t; + s32 __percpu *p = pcp->vm_node_stat_diff + item; + s32 v, t; VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); @@ -646,7 +646,7 @@ void __inc_node_state(struct pglist_data v = __this_cpu_inc_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v > t)) { - s8 overstep = t >> 1; + s32 overstep = t >> 1; node_page_state_add(v + overstep, pgdat, item); __this_cpu_write(*p, -overstep); @@ -670,8 +670,8 @@ EXPORT_SYMBOL(__inc_node_page_state); void __dec_zone_state(struct zone *zone, enum zone_stat_item item) { struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats; - s8 __percpu *p = pcp->vm_stat_diff + item; - s8 v, t; + s32 __percpu *p = pcp->vm_stat_diff + item; + s32 v, t; /* See __mod_node_page_state */ preempt_disable_nested(); @@ -679,7 +679,7 @@ void __dec_zone_state(struct zone *zone, v = __this_cpu_dec_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v < - t)) { - s8 overstep = t >> 1; + s32 overstep = t >> 1; zone_page_state_add(v - overstep, zone, item); __this_cpu_write(*p, overstep); @@ -691,8 +691,8 @@ void __dec_zone_state(struct zone *zone, void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) { struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; - s8 __percpu *p = pcp->vm_node_stat_diff + item; - s8 v, t; + s32 __percpu *p = pcp->vm_node_stat_diff + item; + s32 v, t; VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); @@ -702,7 +702,7 @@ void __dec_node_state(struct pglist_data v = __this_cpu_dec_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v < - t)) { - s8 overstep = t >> 1; + s32 overstep = t >> 1; node_page_state_add(v - overstep, pgdat, item); __this_cpu_write(*p, overstep); _ Patches currently in -mm which might be from mtosatti@xxxxxxxxxx are this_cpu_cmpxchg-arm64-switch-this_cpu_cmpxchg-to-locked-add-_local-function.patch this_cpu_cmpxchg-loongarch-switch-this_cpu_cmpxchg-to-locked-add-_local-function.patch this_cpu_cmpxchg-s390-switch-this_cpu_cmpxchg-to-locked-add-_local-function.patch this_cpu_cmpxchg-x86-switch-this_cpu_cmpxchg-to-locked-add-_local-function.patch add-this_cpu_cmpxchg_local-and-asm-generic-definitions.patch convert-this_cpu_cmpxchg-users-to-this_cpu_cmpxchg_local.patch mm-vmstat-switch-counter-modification-to-cmpxchg.patch vmstat-switch-per-cpu-vmstat-counters-to-32-bits.patch mm-vmstat-use-xchg-in-cpu_vm_stats_fold.patch mm-vmstat-switch-vmstat-shepherd-to-flush-per-cpu-counters-remotely.patch mm-vmstat-refresh-stats-remotely-instead-of-via-work-item.patch vmstat-add-pcp-remote-node-draining-via-cpu_vm_stats_fold.patch