Instead of accessing zoneref->zone directly, use zonelist_zone() like other places for consistency. No functional change. Signed-off-by: Wei Yang <richard.weiyang@xxxxxxxxx> CC: Mike Rapoport (IBM) <rppt@xxxxxxxxxx> CC: David Hildenbrand <david@xxxxxxxxxx> CC: Garg Shivank <shivankg@xxxxxxx> --- v2: cover more usage --- include/linux/mmzone.h | 4 ++-- include/trace/events/oom.h | 2 +- mm/mempolicy.c | 4 ++-- mm/page_alloc.c | 14 +++++++------- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index cb7f265c2b96..51bce636373f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1690,7 +1690,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, zone = zonelist_zone(z)) #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ - for (zone = z->zone; \ + for (zone = zonelist_zone(z); \ zone; \ z = next_zones_zonelist(++z, highidx, nodemask), \ zone = zonelist_zone(z)) @@ -1726,7 +1726,7 @@ static inline bool movable_only_nodes(nodemask_t *nodes) nid = first_node(*nodes); zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); - return (!z->zone) ? true : false; + return (!zonelist_zone(z)) ? true : false; } diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h index a42be4c8563b..fe6997886b77 100644 --- a/include/trace/events/oom.h +++ b/include/trace/events/oom.h @@ -55,7 +55,7 @@ TRACE_EVENT(reclaim_retry_zone, ), TP_fast_assign( - __entry->node = zone_to_nid(zoneref->zone); + __entry->node = zone_to_nid(zonelist_zone(zoneref)); __entry->zone_idx = zoneref->zone_idx; __entry->order = order; __entry->reclaimable = reclaimable; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index f73acb01ad45..83e26ded6278 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1951,7 +1951,7 @@ unsigned int mempolicy_slab_node(void) zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; z = first_zones_zonelist(zonelist, highest_zoneidx, &policy->nodes); - return z->zone ? zone_to_nid(z->zone) : node; + return zonelist_zone(z) ? zone_to_nid(zonelist_zone(z)) : node; } case MPOL_LOCAL: return node; @@ -2806,7 +2806,7 @@ int mpol_misplaced(struct folio *folio, struct vm_fault *vmf, node_zonelist(thisnid, GFP_HIGHUSER), gfp_zone(GFP_HIGHUSER), &pol->nodes); - polnid = zone_to_nid(z->zone); + polnid = zone_to_nid(zonelist_zone(z)); break; default: diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 116ee33fd1ce..e2933885bb19 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4218,7 +4218,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, */ ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, ac->highest_zoneidx, ac->nodemask); - if (!ac->preferred_zoneref->zone) + if (!zonelist_zone(ac->preferred_zoneref)) goto nopage; /* @@ -4230,7 +4230,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct zoneref *z = first_zones_zonelist(ac->zonelist, ac->highest_zoneidx, &cpuset_current_mems_allowed); - if (!z->zone) + if (!zonelist_zone(z)) goto nopage; } @@ -4587,8 +4587,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, continue; } - if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && - zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { + if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && + zone_to_nid(zone) != zone_to_nid(zonelist_zone(ac.preferred_zoneref))) { goto failed; } @@ -4647,7 +4647,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, pcp_trylock_finish(UP_flags); __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); - zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); + zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); out: return nr_populated; @@ -4705,7 +4705,7 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, * Forbid the first pass from falling back to types that fragment * memory until all local zones are considered. */ - alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); + alloc_flags |= alloc_flags_nofragment(zonelist_zone(ac.preferred_zoneref), gfp); /* First allocation attempt */ page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); @@ -5310,7 +5310,7 @@ int local_memory_node(int node) z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), gfp_zone(GFP_KERNEL), NULL); - return zone_to_nid(z->zone); + return zone_to_nid(zonelist_zone(z)); } #endif -- 2.34.1