Le Fri, Jul 19, 2024 at 06:06:12PM -0000, tip-bot2 for Anna-Maria Behnsen a écrit : > @@ -1661,80 +1728,39 @@ static int tmigr_add_cpu(unsigned int cpu) > return ret; > } > > -static int tmigr_cpu_online(unsigned int cpu) > -{ > - struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); > - int ret; > - > - /* First online attempt? Initialize CPU data */ > - if (!tmc->tmgroup) { > - raw_spin_lock_init(&tmc->lock); > - > - ret = tmigr_add_cpu(cpu); > - if (ret < 0) > - return ret; > - > - if (tmc->childmask == 0) > - return -EINVAL; > - > - timerqueue_init(&tmc->cpuevt.nextevt); > - tmc->cpuevt.nextevt.expires = KTIME_MAX; > - tmc->cpuevt.ignore = true; > - tmc->cpuevt.cpu = cpu; > - > - tmc->remote = false; > - WRITE_ONCE(tmc->wakeup, KTIME_MAX); > - } > - raw_spin_lock_irq(&tmc->lock); > - trace_tmigr_cpu_online(tmc); > - tmc->idle = timer_base_is_idle(); > - if (!tmc->idle) > - __tmigr_cpu_activate(tmc); > - tmc->online = true; > - raw_spin_unlock_irq(&tmc->lock); > - return 0; > -} > - > -/* > - * tmigr_trigger_active() - trigger a CPU to become active again > - * > - * This function is executed on a CPU which is part of cpu_online_mask, when the > - * last active CPU in the hierarchy is offlining. With this, it is ensured that > - * the other CPU is active and takes over the migrator duty. > - */ > -static long tmigr_trigger_active(void *unused) > +static int tmigr_cpu_prepare(unsigned int cpu) > { > - struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu); > - > - WARN_ON_ONCE(!tmc->online || tmc->idle); > + struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu); > + int ret = 0; > > - return 0; > -} > + /* > + * The target CPU must never do the prepare work. Otherwise it may > + * spuriously activate the old top level group inside the new one > + * (nevertheless whether old top level group is active or not) and/or > + * release an uninitialized childmask. > + */ > + WARN_ON_ONCE(cpu == raw_smp_processor_id()); Silly me! Who else than the boot CPU can do the prepare work for the boot CPU? This should be something like: diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c index fae04950487f..8d57f7686bb0 100644 --- a/kernel/time/timer_migration.c +++ b/kernel/time/timer_migration.c @@ -1673,6 +1673,15 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node) lvllist = &tmigr_level_list[top]; if (group->num_children == 1 && list_is_singular(lvllist)) { + /* + * The target CPU must never do the prepare work, except + * on early boot when the boot CPU is the target. Otherwise + * it may spuriously activate the old top level group inside + * the new one (nevertheless whether old top level group is + * active or not) and/or release an uninitialized childmask. + */ + WARN_ON_ONCE(cpu == raw_smp_processor_id()); + lvllist = &tmigr_level_list[top - 1]; list_for_each_entry(child, lvllist, list) { if (child->parent) @@ -1705,14 +1714,6 @@ static int tmigr_cpu_prepare(unsigned int cpu) struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu); int ret = 0; - /* - * The target CPU must never do the prepare work. Otherwise it may - * spuriously activate the old top level group inside the new one - * (nevertheless whether old top level group is active or not) and/or - * release an uninitialized childmask. - */ - WARN_ON_ONCE(cpu == raw_smp_processor_id()); - /* Not first online attempt? */ if (tmc->tmgroup) return ret;