On 10/31/2023 1:56 AM, Tom Lendacky wrote: > On 10/30/23 01:36, Nikunj A Dadhania wrote: >> Add support for Secure TSC in SNP enabled guests. Secure TSC allows >> guest to securely use RDTSC/RDTSCP instructions as the parameters >> being used cannot be changed by hypervisor once the guest is launched. >> >> During the boot-up of the secondary cpus, SecureTSC enabled guests >> need to query TSC info from AMD Security Processor. This communication >> channel is encrypted between the AMD Security Processor and the guest, >> the hypervisor is just the conduit to deliver the guest messages to >> the AMD Security Processor. Each message is protected with an >> AEAD (AES-256 GCM). Use minimal AES GCM library to encrypt/decrypt SNP >> Guest messages to communicate with the PSP. > > Add to this commit message that you're using the enc_init hook to perform some Secure TSC initialization and why you have to do that. Sure, will add. >> >> Signed-off-by: Nikunj A Dadhania <nikunj@xxxxxxx> >> --- >> arch/x86/coco/core.c | 3 ++ >> arch/x86/include/asm/sev-guest.h | 18 +++++++ >> arch/x86/include/asm/sev.h | 2 + >> arch/x86/include/asm/svm.h | 6 ++- >> arch/x86/kernel/sev.c | 82 ++++++++++++++++++++++++++++++++ >> arch/x86/mm/mem_encrypt_amd.c | 6 +++ >> include/linux/cc_platform.h | 8 ++++ >> 7 files changed, 123 insertions(+), 2 deletions(-) >> >> diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c >> index eeec9986570e..5d5d4d03c543 100644 >> --- a/arch/x86/coco/core.c >> +++ b/arch/x86/coco/core.c >> @@ -89,6 +89,9 @@ static bool noinstr amd_cc_platform_has(enum cc_attr attr) >> case CC_ATTR_GUEST_SEV_SNP: >> return sev_status & MSR_AMD64_SEV_SNP_ENABLED; >> + case CC_ATTR_GUEST_SECURE_TSC: >> + return sev_status & MSR_AMD64_SNP_SECURE_TSC; >> + >> default: >> return false; >> } >> diff --git a/arch/x86/include/asm/sev-guest.h b/arch/x86/include/asm/sev-guest.h >> index e6f94208173d..58739173eba9 100644 >> --- a/arch/x86/include/asm/sev-guest.h >> +++ b/arch/x86/include/asm/sev-guest.h >> @@ -39,6 +39,8 @@ enum msg_type { >> SNP_MSG_ABSORB_RSP, >> SNP_MSG_VMRK_REQ, >> SNP_MSG_VMRK_RSP, >> + SNP_MSG_TSC_INFO_REQ = 17, >> + SNP_MSG_TSC_INFO_RSP, >> SNP_MSG_TYPE_MAX >> }; >> @@ -111,6 +113,22 @@ struct snp_guest_req { >> u8 msg_type; >> }; >> +struct snp_tsc_info_req { >> +#define SNP_TSC_INFO_REQ_SZ 128 > > Please move this to before the struct definition. > >> + /* Must be zero filled */ >> + u8 rsvd[SNP_TSC_INFO_REQ_SZ]; >> +} __packed; >> + >> +struct snp_tsc_info_resp { >> + /* Status of TSC_INFO message */ >> + u32 status; >> + u32 rsvd1; >> + u64 tsc_scale; >> + u64 tsc_offset; >> + u32 tsc_factor; >> + u8 rsvd2[100]; >> +} __packed; >> + >> int snp_setup_psp_messaging(struct snp_guest_dev *snp_dev); >> int snp_send_guest_request(struct snp_guest_dev *dev, struct snp_guest_req *req, >> struct snp_guest_request_ioctl *rio); >> diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h >> index 783150458864..038a5a15d937 100644 >> --- a/arch/x86/include/asm/sev.h >> +++ b/arch/x86/include/asm/sev.h >> @@ -200,6 +200,7 @@ void __init __noreturn snp_abort(void); >> void snp_accept_memory(phys_addr_t start, phys_addr_t end); >> u64 snp_get_unsupported_features(u64 status); >> u64 sev_get_status(void); >> +void __init snp_secure_tsc_prepare(void); >> #else >> static inline void sev_es_ist_enter(struct pt_regs *regs) { } >> static inline void sev_es_ist_exit(void) { } >> @@ -223,6 +224,7 @@ static inline void snp_abort(void) { } >> static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { } >> static inline u64 snp_get_unsupported_features(u64 status) { return 0; } >> static inline u64 sev_get_status(void) { return 0; } >> +static inline void __init snp_secure_tsc_prepare(void) { } >> #endif >> #endif >> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h >> index 3ac0ffc4f3e2..ee35c0488f56 100644 >> --- a/arch/x86/include/asm/svm.h >> +++ b/arch/x86/include/asm/svm.h >> @@ -414,7 +414,9 @@ struct sev_es_save_area { >> u8 reserved_0x298[80]; >> u32 pkru; >> u32 tsc_aux; >> - u8 reserved_0x2f0[24]; >> + u64 tsc_scale; >> + u64 tsc_offset; >> + u8 reserved_0x300[8]; >> u64 rcx; >> u64 rdx; >> u64 rbx; >> @@ -546,7 +548,7 @@ static inline void __unused_size_checks(void) >> BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x1c0); >> BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x248); >> BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x298); >> - BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x2f0); >> + BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x300); >> BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x320); >> BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x380); >> BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x3f0); >> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c >> index fb3b1feb1b84..9468809d02c7 100644 >> --- a/arch/x86/kernel/sev.c >> +++ b/arch/x86/kernel/sev.c >> @@ -76,6 +76,10 @@ static u64 sev_hv_features __ro_after_init; >> /* Secrets page physical address from the CC blob */ >> static u64 secrets_pa __ro_after_init; >> +/* Secure TSC values read using TSC_INFO SNP Guest request */ >> +static u64 guest_tsc_scale __ro_after_init; >> +static u64 guest_tsc_offset __ro_after_init; > > s/guest_/snp_/ > >> + >> /* #VC handler runtime per-CPU data */ >> struct sev_es_runtime_data { >> struct ghcb ghcb_page; >> @@ -1393,6 +1397,78 @@ bool snp_assign_vmpck(struct snp_guest_dev *dev, unsigned int vmpck_id) >> } >> EXPORT_SYMBOL_GPL(snp_assign_vmpck); >> +static struct snp_guest_dev tsc_snp_dev __initdata; >> + >> +static int __init snp_get_tsc_info(void) >> +{ >> + static u8 buf[SNP_TSC_INFO_REQ_SZ + AUTHTAG_LEN]; >> + struct snp_guest_request_ioctl rio; >> + struct snp_tsc_info_resp tsc_resp; >> + struct snp_tsc_info_req tsc_req; >> + struct snp_guest_req req; >> + int rc, resp_len; >> + >> + /* >> + * The intermediate response buffer is used while decrypting the >> + * response payload. Make sure that it has enough space to cover the >> + * authtag. >> + */ >> + resp_len = sizeof(tsc_resp) + AUTHTAG_LEN; >> + if (sizeof(buf) < resp_len) >> + return -EINVAL; >> + >> + memset(&tsc_req, 0, sizeof(tsc_req)); >> + memset(&req, 0, sizeof(req)); >> + memset(&rio, 0, sizeof(rio)); >> + memset(buf, 0, sizeof(buf)); >> + >> + if (!snp_assign_vmpck(&tsc_snp_dev, 0)) >> + return -EINVAL; >> + >> + /* Initialize the PSP channel to send snp messages */ >> + if (snp_setup_psp_messaging(&tsc_snp_dev)) >> + sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); > > This should just return the non-zero return code from snp_setup_psp_messaging(), no? > > rc = snp_setup_psp_messaging(&tsc_snp_dev); > if (rc) > return rc; Yes, that will also have the same behaviour, snp_get_tsc_info() will send the termination request. >> + >> + req.msg_version = MSG_HDR_VER; >> + req.msg_type = SNP_MSG_TSC_INFO_REQ; >> + req.vmpck_id = tsc_snp_dev.vmpck_id; >> + req.req_buf = &tsc_req; >> + req.req_sz = sizeof(tsc_req); >> + req.resp_buf = buf; >> + req.resp_sz = resp_len; >> + req.exit_code = SVM_VMGEXIT_GUEST_REQUEST; >> + rc = snp_send_guest_request(&tsc_snp_dev, &req, &rio); > > Aren't you supposed to hold a mutex before calling this since it will eventually call the message sequence number functions? Yes, I will need to otherwise lockdep will complain. This is being called from boot processor, so there is no parallel execution. >> + if (rc) >> + goto err_req; >> + >> + memcpy(&tsc_resp, buf, sizeof(tsc_resp)); >> + pr_debug("%s: Valid response status %x scale %llx offset %llx factor %x\n", >> + __func__, tsc_resp.status, tsc_resp.tsc_scale, tsc_resp.tsc_offset, >> + tsc_resp.tsc_factor); >> + >> + guest_tsc_scale = tsc_resp.tsc_scale; >> + guest_tsc_offset = tsc_resp.tsc_offset; >> + >> +err_req: >> + /* The response buffer contains the sensitive data, explicitly clear it. */ >> + memzero_explicit(buf, sizeof(buf)); >> + memzero_explicit(&tsc_resp, sizeof(tsc_resp)); >> + memzero_explicit(&req, sizeof(req)); >> + >> + return rc; >> +} >> + >> +void __init snp_secure_tsc_prepare(void) >> +{ >> + if (!cc_platform_has(CC_ATTR_GUEST_SECURE_TSC)) >> + return; >> + >> + if (snp_get_tsc_info()) >> + sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); > > How about using SEV_TERM_SET_LINUX and a new GHCB_TERM_SECURE_TSC_INFO. Yes, we can do that, I remember you had said this will required GHCB spec change and then thought of sticking with the current return code. > >> + >> + pr_debug("SecureTSC enabled\n"); >> +} >> + >> static int wakeup_cpu_via_vmgexit(int apic_id, unsigned long start_ip) >> { >> struct sev_es_save_area *cur_vmsa, *vmsa; >> @@ -1493,6 +1569,12 @@ static int wakeup_cpu_via_vmgexit(int apic_id, unsigned long start_ip) >> vmsa->vmpl = 0; >> vmsa->sev_features = sev_status >> 2; >> + /* Setting Secure TSC parameters */ >> + if (cc_platform_has(CC_ATTR_GUEST_SECURE_TSC)) { >> + vmsa->tsc_scale = guest_tsc_scale; >> + vmsa->tsc_offset = guest_tsc_offset; >> + } >> + >> /* Switch the page over to a VMSA page now that it is initialized */ >> ret = snp_set_vmsa(vmsa, true); >> if (ret) { >> diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c >> index 6faea41e99b6..9935fc506e99 100644 >> --- a/arch/x86/mm/mem_encrypt_amd.c >> +++ b/arch/x86/mm/mem_encrypt_amd.c >> @@ -215,6 +215,11 @@ void __init sme_map_bootdata(char *real_mode_data) >> __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true); >> } >> +void __init amd_enc_init(void) >> +{ >> + snp_secure_tsc_prepare(); >> +} >> + >> void __init sev_setup_arch(void) >> { >> phys_addr_t total_mem = memblock_phys_mem_size(); >> @@ -502,6 +507,7 @@ void __init sme_early_init(void) >> x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish; >> x86_platform.guest.enc_tlb_flush_required = amd_enc_tlb_flush_required; >> x86_platform.guest.enc_cache_flush_required = amd_enc_cache_flush_required; >> + x86_platform.guest.enc_init = amd_enc_init; >> /* >> * AMD-SEV-ES intercepts the RDMSR to read the X2APIC ID in the Regards Nikunj