For CONFIG_CPUMASK_OFFSTACK=y kernel, explicit allocation of cpumask variable on stack is not recommended since it can cause potential stack overflow. Instead, kernel code should always use *cpumask_var API(s) to allocate cpumask var in config- neutral way, leaving allocation strategy to CONFIG_CPUMASK_OFFSTACK. Use *cpumask_var API(s) to address it. Signed-off-by: Dawei Li <dawei.li@xxxxxxxxxxxx> --- drivers/perf/arm_cspmu/arm_cspmu.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c index b9a252272f1e..8fa7c26aec28 100644 --- a/drivers/perf/arm_cspmu/arm_cspmu.c +++ b/drivers/perf/arm_cspmu/arm_cspmu.c @@ -1322,8 +1322,8 @@ static int arm_cspmu_cpu_online(unsigned int cpu, struct hlist_node *node) static int arm_cspmu_cpu_teardown(unsigned int cpu, struct hlist_node *node) { + cpumask_var_t online_supported; int dst; - struct cpumask online_supported; struct arm_cspmu *cspmu = hlist_entry_safe(node, struct arm_cspmu, cpuhp_node); @@ -1332,17 +1332,22 @@ static int arm_cspmu_cpu_teardown(unsigned int cpu, struct hlist_node *node) if (!cpumask_test_and_clear_cpu(cpu, &cspmu->active_cpu)) return 0; + if (!alloc_cpumask_var(&online_supported, GFP_KERNEL)) + return 0; + /* Choose a new CPU to migrate ownership of the PMU to */ - cpumask_and(&online_supported, &cspmu->associated_cpus, + cpumask_and(online_supported, &cspmu->associated_cpus, cpu_online_mask); - dst = cpumask_any_but(&online_supported, cpu); + dst = cpumask_any_but(online_supported, cpu); if (dst >= nr_cpu_ids) - return 0; + goto __free_cpumask; /* Use this CPU for event counting */ perf_pmu_migrate_context(&cspmu->pmu, cpu, dst); arm_cspmu_set_active_cpu(dst, cspmu); +__free_cpumask: + free_cpumask_var(online_supported); return 0; } -- 2.27.0