On Wed, Sep 28, 2022, Carlos Bilbao wrote: > @@ -3510,7 +3510,7 @@ bool sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu) > > ret = __sev_snp_update_protected_guest_state(vcpu); > if (ret) > - vcpu_unimpl(vcpu, "snp: AP state update on init failed\n"); > + vcpu_unimpl(vcpu, "SNP: AP state update on init failed\n"); > > unlock: > mutex_unlock(&svm->snp_vmsa_mutex); > @@ -4170,7 +4170,7 @@ void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu) > /* PKRU is restored on VMEXIT, save the current host value */ > hostsa->pkru = read_pkru(); > > - /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */ > + /* MSR_IA32_XSS is restored on VMEXIT, save the current host value */ > hostsa->xss = host_xss; > } > > @@ -4223,7 +4223,7 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) > * Allocate an SNP safe page to workaround the SNP erratum where > * the CPU will incorrectly signal an RMP violation #PF if a > * hugepage (2mb or 1gb) collides with the RMP entry of VMCB, VMSA > - * or AVIC backing page. The recommeded workaround is to not use the > + * or AVIC backing page. The recommended workaround is to not use the > * hugepage. > * > * Allocate one extra page, use a page which is not 2mb aligned SNP support doesn't exist upstream, looks like this was generated against an SNP branch.