For CONFIG_CPUMASK_OFFSTACK=y kernel, explicit allocation of cpumask variable on stack is not recommended since it can cause potential stack overflow. Instead, kernel code should always use *cpumask_var API(s) to allocate cpumask var in config- neutral way, leaving allocation strategy to CONFIG_CPUMASK_OFFSTACK. Use *cpumask_var API(s) to address it. Signed-off-by: Dawei Li <dawei.li@xxxxxxxxxxxx> --- drivers/perf/arm-cmn.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 7ef9c7e4836b..7278fd72d3da 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -1949,21 +1949,26 @@ static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_no { struct arm_cmn *cmn; unsigned int target; + cpumask_var_t mask; int node; - cpumask_t mask; cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node); if (cpu != cmn->cpu) return 0; + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + return 0; + node = dev_to_node(cmn->dev); - if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) && - cpumask_andnot(&mask, &mask, cpumask_of(cpu))) - target = cpumask_any(&mask); + if (cpumask_and(mask, cpumask_of_node(node), cpu_online_mask) && + cpumask_andnot(mask, mask, cpumask_of(cpu))) + target = cpumask_any(mask); else target = cpumask_any_but(cpu_online_mask, cpu); if (target < nr_cpu_ids) arm_cmn_migrate(cmn, target); + + free_cpumask_var(mask); return 0; } -- 2.27.0