For CONFIG_CPUMASK_OFFSTACK=y kernel, explicit allocation of cpumask variable on stack is not recommended since it can cause potential stack overflow. Instead, kernel code should always use *cpumask_var API(s) to allocate cpumask var in config- neutral way, leaving allocation strategy to CONFIG_CPUMASK_OFFSTACK. Use *cpumask_var API(s) to address it. Signed-off-by: Dawei Li <dawei.li@xxxxxxxxxxxx> --- drivers/perf/alibaba_uncore_drw_pmu.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c index a9277dcf90ce..251f0a2dee84 100644 --- a/drivers/perf/alibaba_uncore_drw_pmu.c +++ b/drivers/perf/alibaba_uncore_drw_pmu.c @@ -743,25 +743,28 @@ static void ali_drw_pmu_remove(struct platform_device *pdev) static int ali_drw_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) { + cpumask_var_t node_online_cpus; struct ali_drw_pmu_irq *irq; struct ali_drw_pmu *drw_pmu; unsigned int target; int ret; - cpumask_t node_online_cpus; irq = hlist_entry_safe(node, struct ali_drw_pmu_irq, node); if (cpu != irq->cpu) return 0; - ret = cpumask_and(&node_online_cpus, + if (!alloc_cpumask_var(&node_online_cpus, GFP_KERNEL)) + return 0; + + ret = cpumask_and(node_online_cpus, cpumask_of_node(cpu_to_node(cpu)), cpu_online_mask); if (ret) - target = cpumask_any_but(&node_online_cpus, cpu); + target = cpumask_any_but(node_online_cpus, cpu); else target = cpumask_any_but(cpu_online_mask, cpu); if (target >= nr_cpu_ids) - return 0; + goto __free_cpumask; /* We're only reading, but this isn't the place to be involving RCU */ mutex_lock(&ali_drw_pmu_irqs_lock); @@ -772,6 +775,8 @@ static int ali_drw_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) WARN_ON(irq_set_affinity_hint(irq->irq_num, cpumask_of(target))); irq->cpu = target; +__free_cpumask: + free_cpumask_var(node_online_cpus); return 0; } -- 2.27.0