Hello Mel Gorman, The patch 0f87d9d30f21: "mm/page_alloc: add an array-based interface to the bulk page allocator" from Apr 29, 2021, leads to the following static checker warning: mm/page_alloc.c:5338 __alloc_pages_bulk() warn: potentially one past the end of array 'page_array[nr_populated]' mm/page_alloc.c 5225 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, 5226 nodemask_t *nodemask, int nr_pages, 5227 struct list_head *page_list, 5228 struct page **page_array) 5229 { 5230 struct page *page; 5231 unsigned long flags; 5232 struct zone *zone; 5233 struct zoneref *z; 5234 struct per_cpu_pages *pcp; 5235 struct list_head *pcp_list; 5236 struct alloc_context ac; 5237 gfp_t alloc_gfp; 5238 unsigned int alloc_flags = ALLOC_WMARK_LOW; 5239 int nr_populated = 0, nr_account = 0; 5240 5241 if (unlikely(nr_pages <= 0)) 5242 return 0; 5243 5244 /* 5245 * Skip populated array elements to determine if any pages need 5246 * to be allocated before disabling IRQs. 5247 */ 5248 while (page_array && nr_populated < nr_pages && page_array[nr_populated]) ^^^^^^^^^^^^^^^^^^^^^^^ Presumably we can have all the pages populated. 5249 nr_populated++; 5250 5251 /* Use the single page allocator for one page. */ 5252 if (nr_pages - nr_populated == 1) 5253 goto failed; 5254 5255 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 5256 gfp &= gfp_allowed_mask; 5257 alloc_gfp = gfp; 5258 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 5259 return 0; 5260 gfp = alloc_gfp; 5261 5262 /* Find an allowed local zone that meets the low watermark. */ 5263 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { 5264 unsigned long mark; 5265 5266 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 5267 !__cpuset_zone_allowed(zone, gfp)) { 5268 continue; 5269 } 5270 5271 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && 5272 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { 5273 goto failed; 5274 } 5275 5276 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 5277 if (zone_watermark_fast(zone, 0, mark, 5278 zonelist_zone_idx(ac.preferred_zoneref), 5279 alloc_flags, gfp)) { 5280 break; 5281 } 5282 } 5283 5284 /* 5285 * If there are no allowed local zones that meets the watermarks then 5286 * try to allocate a single page and reclaim if necessary. 5287 */ 5288 if (unlikely(!zone)) 5289 goto failed; ^^^^^^^^^^^^ If we hit a goto then it puts the new page beyond the end of the array. 5290 5291 /* Attempt the batch allocation */ 5292 local_lock_irqsave(&pagesets.lock, flags); 5293 pcp = this_cpu_ptr(zone->per_cpu_pageset); 5294 pcp_list = &pcp->lists[ac.migratetype]; 5295 5296 while (nr_populated < nr_pages) { 5297 5298 /* Skip existing pages */ 5299 if (page_array && page_array[nr_populated]) { 5300 nr_populated++; 5301 continue; 5302 } 5303 5304 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 5305 pcp, pcp_list); 5306 if (unlikely(!page)) { 5307 /* Try and get at least one page */ 5308 if (!nr_populated) 5309 goto failed_irq; 5310 break; 5311 } 5312 nr_account++; 5313 5314 prep_new_page(page, 0, gfp, 0); 5315 if (page_list) 5316 list_add(&page->lru, page_list); 5317 else 5318 page_array[nr_populated] = page; 5319 nr_populated++; 5320 } 5321 5322 local_unlock_irqrestore(&pagesets.lock, flags); 5323 5324 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 5325 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); 5326 5327 return nr_populated; 5328 5329 failed_irq: 5330 local_unlock_irqrestore(&pagesets.lock, flags); 5331 5332 failed: 5333 page = __alloc_pages(gfp, 0, preferred_nid, nodemask); 5334 if (page) { 5335 if (page_list) 5336 list_add(&page->lru, page_list); 5337 else 5338 page_array[nr_populated] = page; ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 5339 nr_populated++; 5340 } 5341 5342 return nr_populated; 5343 } regards, dan carpenter