On 20.02.23 19:38, Michael Roth wrote:
From: Brijesh Singh <brijesh.singh@xxxxxxx> Version 2 of GHCB specification added the support for two SNP Guest Request Message NAE events. The events allows for an SEV-SNP guest to make request to the SEV-SNP firmware through hypervisor using the SNP_GUEST_REQUEST API define in the SEV-SNP firmware specification. The SNP_EXT_GUEST_REQUEST is similar to SNP_GUEST_REQUEST with the difference of an additional certificate blob that can be passed through the SNP_SET_CONFIG ioctl defined in the CCP driver. The CCP driver provides snp_guest_ext_guest_request() that is used by the KVM to get both the report and certificate data at once. Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx> Signed-off-by: Ashish Kalra <ashish.kalra@xxxxxxx> Signed-off-by: Michael Roth <michael.roth@xxxxxxx> --- arch/x86/kvm/svm/sev.c | 185 +++++++++++++++++++++++++++++++++++++++-- arch/x86/kvm/svm/svm.h | 2 + 2 files changed, 181 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 197b1f904567..92179614102e 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -327,6 +327,7 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) if (ret) goto e_free; + mutex_init(&sev->guest_req_lock); ret = sev_snp_init(&argp->error, false); } else { ret = sev_platform_init(&argp->error); @@ -2059,23 +2060,34 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd) */ static void *snp_context_create(struct kvm *kvm, struct kvm_sev_cmd *argp) { + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; struct sev_data_snp_addr data = {}; - void *context; + void *context, *certs_data; int rc; + /* Allocate memory used for the certs data in SNP guest request */ + certs_data = kzalloc(SEV_FW_BLOB_MAX_SIZE, GFP_KERNEL_ACCOUNT); + if (!certs_data) + return NULL; + /* Allocate memory for context page */ context = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT); if (!context) - return NULL; + goto e_free; data.gctx_paddr = __psp_pa(context); rc = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_GCTX_CREATE, &data, &argp->error); - if (rc) { - snp_free_firmware_page(context); - return NULL; - } + if (rc) + goto e_free; + + sev->snp_certs_data = certs_data; return context; + +e_free: + snp_free_firmware_page(context); + kfree(certs_data); + return NULL; } static int snp_bind_asid(struct kvm *kvm, int *error) @@ -2693,6 +2705,8 @@ static int snp_decommission_context(struct kvm *kvm) snp_free_firmware_page(sev->snp_context); sev->snp_context = NULL; + kfree(sev->snp_certs_data); + return 0; } @@ -3153,6 +3167,8 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm) case SVM_VMGEXIT_UNSUPPORTED_EVENT: case SVM_VMGEXIT_HV_FEATURES: case SVM_VMGEXIT_PSC: + case SVM_VMGEXIT_GUEST_REQUEST: + case SVM_VMGEXIT_EXT_GUEST_REQUEST: break; default: reason = GHCB_ERR_INVALID_EVENT; @@ -3384,6 +3400,149 @@ static int snp_complete_psc(struct kvm_vcpu *vcpu) return 1; } +static unsigned long snp_setup_guest_buf(struct vcpu_svm *svm, + struct sev_data_snp_guest_request *data, + gpa_t req_gpa, gpa_t resp_gpa) +{ + struct kvm_vcpu *vcpu = &svm->vcpu; + struct kvm *kvm = vcpu->kvm; + kvm_pfn_t req_pfn, resp_pfn; + struct kvm_sev_info *sev; + + sev = &to_kvm_svm(kvm)->sev_info; + + if (!IS_ALIGNED(req_gpa, PAGE_SIZE) || !IS_ALIGNED(resp_gpa, PAGE_SIZE)) + return SEV_RET_INVALID_PARAM; + + req_pfn = gfn_to_pfn(kvm, gpa_to_gfn(req_gpa)); + if (is_error_noslot_pfn(req_pfn)) + return SEV_RET_INVALID_ADDRESS; + + resp_pfn = gfn_to_pfn(kvm, gpa_to_gfn(resp_gpa)); + if (is_error_noslot_pfn(resp_pfn)) + return SEV_RET_INVALID_ADDRESS; + + if (rmp_make_private(resp_pfn, 0, PG_LEVEL_4K, 0, true)) + return SEV_RET_INVALID_ADDRESS; + + data->gctx_paddr = __psp_pa(sev->snp_context); + data->req_paddr = __sme_set(req_pfn << PAGE_SHIFT); + data->res_paddr = __sme_set(resp_pfn << PAGE_SHIFT); + + return 0; +} + +static void snp_cleanup_guest_buf(struct sev_data_snp_guest_request *data, unsigned long *rc) +{ + u64 pfn = __sme_clr(data->res_paddr) >> PAGE_SHIFT; + int ret; + + ret = snp_page_reclaim(pfn); + if (ret) + *rc = SEV_RET_INVALID_ADDRESS; + + ret = rmp_make_shared(pfn, PG_LEVEL_4K); + if (ret) + *rc = SEV_RET_INVALID_ADDRESS; +} + +static void snp_handle_guest_request(struct vcpu_svm *svm, gpa_t req_gpa, gpa_t resp_gpa) +{ + struct sev_data_snp_guest_request data = {0}; + struct kvm_vcpu *vcpu = &svm->vcpu; + struct kvm *kvm = vcpu->kvm; + struct kvm_sev_info *sev; + unsigned long rc; + int err; + + if (!sev_snp_guest(vcpu->kvm)) { + rc = SEV_RET_INVALID_GUEST; + goto e_fail; + } + + sev = &to_kvm_svm(kvm)->sev_info; + + mutex_lock(&sev->guest_req_lock); + + rc = snp_setup_guest_buf(svm, &data, req_gpa, resp_gpa); + if (rc) + goto unlock; + + rc = sev_issue_cmd(kvm, SEV_CMD_SNP_GUEST_REQUEST, &data, &err); + if (rc) + /* use the firmware error code */ + rc = err;
There are cases where sev_issue_cmd can fail, but not set err. For example, when the file descriptor is incorrect. In that case, this code path leaks uninitialized state from the host stack (err) all the way into a guest.
Alex Amazon Development Center Germany GmbH Krausenstr. 38 10117 Berlin Geschaeftsfuehrung: Christian Schlaeger, Jonathan Weiss Eingetragen am Amtsgericht Charlottenburg unter HRB 149173 B Sitz: Berlin Ust-ID: DE 289 237 879