Hello, On Thu, Feb 01, 2024 at 06:56:20PM +0100, Helge Deller wrote: > root@debian:~# drgn --main-symbols -s ./vmlinux ./wq_dump.py 2>&1 | tee L > Affinity Scopes > =============== > wq_unbound_cpumask=0000ffff > > CPU > nr_pods 16 > pod_cpus [0]=00000001 [1]=00000002 [2]=00000004 [3]=00000008 [4]=00000010 [5]=00000020 [6]=00000040 [7]=00000080 [8]=00000100 [9]=00000200 [10]=00000400 [11]=00000800 [12]=00001000 [13]=00002000 [14]=00004000 [15]=00008000 > pod_node [0]=0 [1]=0 [2]=0 [3]=0 [4]=0 [5]=0 [6]=0 [7]=0 [8]=0 [9]=0 [10]=0 [11]=0 [12]=0 [13]=0 [14]=0 [15]=0 > cpu_pod [0]=0 [1]=1 wq_unbound_cpumask is saying there are 16 possible cpus but for_each_possible_cpu() iteration is only giving two. Can you please apply the following patch and post the boot dmesg? Thanks. diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ffb625db9771..d3fa2bea4d75 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -7146,6 +7146,9 @@ void __init workqueue_init_early(void) BUG_ON(!alloc_cpumask_var(&wq_requested_unbound_cpumask, GFP_KERNEL)); BUG_ON(!zalloc_cpumask_var(&wq_isolated_cpumask, GFP_KERNEL)); + printk("XXX workqueue_init_early: possible_cpus=%*pb\n", + cpumask_pr_args(cpu_possible_mask)); + cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ)); restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN)); @@ -7290,6 +7293,9 @@ void __init workqueue_init(void) struct worker_pool *pool; int cpu, bkt; + printk("XXX workqueue_init: possible_cpus=%*pb\n", + cpumask_pr_args(cpu_possible_mask)); + wq_cpu_intensive_thresh_init(); mutex_lock(&wq_pool_mutex); @@ -7401,6 +7407,9 @@ void __init workqueue_init_topology(void) struct workqueue_struct *wq; int cpu; + printk("XXX workqueue_init_topology: possible_cpus=%*pb\n", + cpumask_pr_args(cpu_possible_mask)); + init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share); init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt); init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);