Although the kernel switches over to stable TSC clocksource instead of PV clocksource, the scheduler still keeps on using PV clocks as the sched clock source. This is because KVM, Xen and VMWare, switch the paravirt sched clock handler in their init routines. HyperV is the only PV clock source that checks if the platform provides an invariant TSC and does not switch to PV sched clock. When switching back to stable TSC, restore the scheduler clock to native_sched_clock(). As the clock selection happens in the stop machine context, schedule delayed work to update the static_call() Cc: Alexey Makhalov <alexey.makhalov@xxxxxxxxxxxx> Cc: Juergen Gross <jgross@xxxxxxxx> Cc: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Signed-off-by: Nikunj A Dadhania <nikunj@xxxxxxx> --- arch/x86/kernel/tsc.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index cf29ede4ee80..d8f4844244f4 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -272,10 +272,25 @@ bool using_native_sched_clock(void) { return static_call_query(pv_sched_clock) == native_sched_clock; } + +static void enable_native_sc_work(struct work_struct *work) +{ + pr_info("using native sched clock\n"); + paravirt_set_sched_clock(native_sched_clock); +} +static DECLARE_DELAYED_WORK(enable_native_sc, enable_native_sc_work); + +static void enable_native_sched_clock(void) +{ + if (!using_native_sched_clock()) + schedule_delayed_work(&enable_native_sc, 0); +} #else u64 sched_clock_noinstr(void) __attribute__((alias("native_sched_clock"))); bool using_native_sched_clock(void) { return true; } + +static void enable_native_sched_clock(void) { } #endif notrace u64 sched_clock(void) @@ -1157,6 +1172,10 @@ static void tsc_cs_tick_stable(struct clocksource *cs) static int tsc_cs_enable(struct clocksource *cs) { vclocks_set_used(VDSO_CLOCKMODE_TSC); + + /* Restore native_sched_clock() when switching to TSC */ + enable_native_sched_clock(); + return 0; } -- 2.34.1