On Tue, Jan 30, 2024 at 01:20:46PM -0500, Gregory Price wrote: > + /* Continue allocating from most recent node and adjust the nr_pages */ > + node = me->il_prev; > + weight = me->il_weight; > + if (weight && node_isset(node, nodes)) { > + node_pages = min(rem_pages, weight); > + nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages, > + NULL, page_array); > + page_array += nr_allocated; > + total_allocated += nr_allocated; > + /* if that's all the pages, no need to interleave */ > + if (rem_pages < weight) { > + /* stay on current node, adjust il_weight */ > + me->il_weight -= rem_pages; > + return total_allocated; > + } else if (rem_pages == weight) { > + /* move to next node / weight */ > + me->il_prev = next_node_in(node, nodes); > + me->il_weight = get_il_weight(next_node); Sigh, I managed to miss a small update that killed next_node in favor of operating directly on il_prev. Can you squash this fix into the patch? Otherwise I can submit a separate patch. ~Gregory diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 7cd92f4ec0d7..2c1aef8eab70 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2382,7 +2382,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp, unsigned int weight_total = 0; unsigned long rem_pages = nr_pages; nodemask_t nodes; - int nnodes, node, next_node; + int nnodes, node; int resume_node = MAX_NUMNODES - 1; u8 resume_weight = 0; int prev_node; @@ -2412,7 +2412,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp, } else if (rem_pages == weight) { /* move to next node / weight */ me->il_prev = next_node_in(node, nodes); - me->il_weight = get_il_weight(next_node); + me->il_weight = get_il_weight(me->il_prev); return total_allocated; } /* Otherwise we adjust remaining pages, continue from there */