Servers happened below panic: Kernel version:5.4.56 BUG: unable to handle page fault for address: 0000000000002c48 RIP: 0010:__next_zones_zonelist+0x1d/0x40 [264003.977696] RAX: 0000000000002c40 RBX: 0000000000100dca RCX: 0000000000000014 [264003.977872] Call Trace: [264003.977888] __alloc_pages_nodemask+0x277/0x310 [264003.977908] alloc_page_interleave+0x13/0x70 [264003.977926] handle_mm_fault+0xf99/0x1390 [264003.977951] __do_page_fault+0x288/0x500 [264003.977979] ? schedule+0x39/0xa0 [264003.977994] do_page_fault+0x30/0x110 [264003.978010] page_fault+0x3e/0x50 The reason of panic is that MAX_NUMNODES is passd in the third parameter in function __alloc_pages_nodemask(preferred_nid). So if to access zonelist->zoneref->zone_idx in __next_zones_zonelist the panic will happen. In offset_il_node(), first_node() return nid from pol->v.nodes, after this other threads may changed pol->v.nodes before next_node(). This race condition will let next_node return MAX_NUMNODES.So put pol->nodes in a local variable. The race condition is between offset_il_node and cpuset_change_task_nodemask: CPU0: CPU1: alloc_pages_vma() interleave_nid(pol,) offset_il_node(pol,) first_node(pol->v.nodes) cpuset_change_task_nodemask //nodes==0xc mpol_rebind_task mpol_rebind_policy mpol_rebind_nodemask(pol,nodes) //nodes==0x3 next_node(nid, pol->v.nodes)//return MAX_NUMNODES Signed-off-by: yanghui <yanghui.def@xxxxxxxxxxxxx> --- mm/mempolicy.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index e32360e90274..9c3c168af3e2 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -193,7 +193,7 @@ static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes) { if (nodes_empty(*nodes)) return -EINVAL; - pol->nodes = *nodes; + WRITE_ONCE(pol->nodes, *nodes); return 0; } @@ -211,7 +211,7 @@ static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes) { if (nodes_empty(*nodes)) return -EINVAL; - pol->nodes = *nodes; + WRITE_ONCE(pol->nodes, *nodes); return 0; } @@ -334,7 +334,7 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) if (nodes_empty(tmp)) tmp = *nodes; - pol->nodes = tmp; + WRITE_ONCE(pol->nodes, tmp); } static void mpol_rebind_preferred(struct mempolicy *pol, @@ -1965,7 +1965,8 @@ unsigned int mempolicy_slab_node(void) */ static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) { - unsigned nnodes = nodes_weight(pol->nodes); + nodemask_t nodemask = READ_ONCE(pol->nodes); + unsigned nnodes = nodes_weight(nodemask); unsigned target; int i; int nid; @@ -1973,9 +1974,9 @@ static unsigned offset_il_node(struct mempolicy *pol, unsigned long n) if (!nnodes) return numa_node_id(); target = (unsigned int)n % nnodes; - nid = first_node(pol->nodes); + nid = first_node(nodemask); for (i = 0; i < target; i++) - nid = next_node(nid, pol->nodes); + nid = next_node(nid, nodemask); return nid; } -- 2.20.1