On Thu, Dec 09, 2021 at 03:09:35PM +0000, David Woodhouse wrote: > diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c > index 50a4515fe0ad..4ee247d89a49 100644 > --- a/arch/x86/kernel/tsc_sync.c > +++ b/arch/x86/kernel/tsc_sync.c > @@ -202,6 +202,7 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu) > * Entry/exit counters that make sure that both CPUs > * run the measurement code at once: > */ > +static atomic_t tsc_sync_cpu = ATOMIC_INIT(-1); > static atomic_t start_count; > static atomic_t stop_count; > static atomic_t skip_test; > @@ -326,6 +327,8 @@ void check_tsc_sync_source(int cpu) > atomic_set(&test_runs, 1); > else > atomic_set(&test_runs, 3); > + > + atomic_set(&tsc_sync_cpu, cpu); > retry: > /* > * Wait for the target to start or to skip the test: > @@ -407,6 +410,10 @@ void check_tsc_sync_target(void) > if (unsynchronized_tsc()) > return; > > + /* Wait for this CPU's turn */ > + while (atomic_read(&tsc_sync_cpu) != cpu) > + cpu_relax(); > + > /* > * Store, verify and sanitize the TSC adjust register. If > * successful skip the test. This new atomic_t seems superfluous, there isn't any actual atomic operation used.