On 12/16/2024 10:01 PM, Tom Lendacky wrote: > On 12/3/24 03:00, Nikunj A Dadhania wrote: >> Calibrating the TSC frequency using the kvmclock is not correct for >> SecureTSC enabled guests. Use the platform provided TSC frequency via the >> GUEST_TSC_FREQ MSR (C001_0134h). >> >> Signed-off-by: Nikunj A Dadhania <nikunj@xxxxxxx> >> --- >> arch/x86/include/asm/sev.h | 2 ++ >> arch/x86/coco/sev/core.c | 16 ++++++++++++++++ >> arch/x86/kernel/tsc.c | 5 +++++ >> 3 files changed, 23 insertions(+) >> >> diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h >> index 9fd02efef08e..c4dca06b3b01 100644 >> --- a/arch/x86/include/asm/sev.h >> +++ b/arch/x86/include/asm/sev.h >> @@ -493,6 +493,7 @@ int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req >> struct snp_guest_request_ioctl *rio); >> >> void __init snp_secure_tsc_prepare(void); >> +void __init snp_secure_tsc_init(void); >> >> #else /* !CONFIG_AMD_MEM_ENCRYPT */ >> >> @@ -536,6 +537,7 @@ static inline void snp_msg_free(struct snp_msg_desc *mdesc) { } >> static inline int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req, >> struct snp_guest_request_ioctl *rio) { return -ENODEV; } >> static inline void __init snp_secure_tsc_prepare(void) { } >> +static inline void __init snp_secure_tsc_init(void) { } >> >> #endif /* CONFIG_AMD_MEM_ENCRYPT */ >> >> diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c >> index 59c5e716fdd1..1bc668883058 100644 >> --- a/arch/x86/coco/sev/core.c >> +++ b/arch/x86/coco/sev/core.c >> @@ -3279,3 +3279,19 @@ void __init snp_secure_tsc_prepare(void) >> >> pr_debug("SecureTSC enabled"); >> } >> + >> +static unsigned long securetsc_get_tsc_khz(void) >> +{ >> + unsigned long long tsc_freq_mhz; >> + >> + setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); >> + rdmsrl(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz); > > This should never change, right? Can this be put in snp_secure_tsc_init() > and just return a saved value that is already in khz form? No reason to > perform the MSR access and multiplication every time. This happens a couple of times during the boot, so I think this does not have much overhead. Something like below ? diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c index c7870294a957..69b65c4c850c 100644 --- a/arch/x86/coco/sev/core.c +++ b/arch/x86/coco/sev/core.c @@ -103,6 +103,7 @@ static u64 secrets_pa __ro_after_init; */ static u64 snp_tsc_scale __ro_after_init; static u64 snp_tsc_offset __ro_after_init; +static u64 snp_tsc_freq_khz __ro_after_init; /* #VC handler runtime per-CPU data */ struct sev_es_runtime_data { @@ -3282,16 +3283,18 @@ void __init snp_secure_tsc_prepare(void) static unsigned long securetsc_get_tsc_khz(void) { - unsigned long long tsc_freq_mhz; - setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); - rdmsrl(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz); - return (unsigned long)(tsc_freq_mhz * 1000); + return snp_tsc_freq_khz; } void __init snp_secure_tsc_init(void) { + unsigned long long tsc_freq_mhz; + + rdmsrl(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz); + snp_tsc_freq_khz = (unsigned long)(tsc_freq_mhz * 1000); + x86_platform.calibrate_cpu = securetsc_get_tsc_khz; x86_platform.calibrate_tsc = securetsc_get_tsc_khz; } --- Regards Nikunj