The KVM_SEV_SNP_LAUNCH_UPDATE command can be used to insert data into the guest's memory. The data is encrypted with the cryptographic context created with the KVM_SEV_SNP_LAUNCH_START. In addition to the inserting data, it can insert a two special pages into the guests memory: the secrets page and the CPUID page. For more information see the SEV-SNP specification. Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx> --- arch/x86/kvm/svm/sev.c | 139 +++++++++++++++++++++++++++++++++++++++ include/linux/sev.h | 2 + include/uapi/linux/kvm.h | 18 +++++ 3 files changed, 159 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index dac71bdedac4..dc9343ecca14 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -17,6 +17,7 @@ #include <linux/misc_cgroup.h> #include <linux/processor.h> #include <linux/trace_events.h> +#include <linux/sev.h> #include <asm/fpu/internal.h> #include <asm/trapnr.h> @@ -1605,6 +1606,141 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) return rc; } +static struct kvm_memory_slot *hva_to_memslot(struct kvm *kvm, unsigned long hva) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot; + + kvm_for_each_memslot(memslot, slots) { + if (hva >= memslot->userspace_addr && + hva < memslot->userspace_addr + (memslot->npages << PAGE_SHIFT)) + return memslot; + } + + return NULL; +} + +static bool hva_to_gpa(struct kvm *kvm, unsigned long hva, gpa_t *gpa) +{ + struct kvm_memory_slot *memslot; + gpa_t gpa_offset; + + memslot = hva_to_memslot(kvm, hva); + if (!memslot) + return false; + + gpa_offset = hva - memslot->userspace_addr; + *gpa = ((memslot->base_gfn << PAGE_SHIFT) + gpa_offset); + + return true; +} + +static int snp_page_reclaim(struct page *page, int rmppage_size) +{ + struct sev_data_snp_page_reclaim data = {}; + struct rmpupdate e = {}; + int rc, err; + + data.paddr = __sme_page_pa(page) | rmppage_size; + rc = snp_guest_page_reclaim(&data, &err); + if (rc) + return rc; + + return rmpupdate(page, &e); +} + +static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + unsigned long npages, vaddr, vaddr_end, i, next_vaddr; + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_snp_launch_update data = {}; + struct kvm_sev_snp_launch_update params; + int *error = &argp->error; + struct kvm_vcpu *vcpu; + struct page **inpages; + struct rmpupdate e; + int ret; + + if (!sev_snp_guest(kvm)) + return -ENOTTY; + + if (!sev->snp_context) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) + return -EFAULT; + + data.gctx_paddr = __psp_pa(sev->snp_context); + + /* Lock the user memory. */ + inpages = sev_pin_memory(kvm, params.uaddr, params.len, &npages, 1); + if (!inpages) + return -ENOMEM; + + vcpu = kvm_get_vcpu(kvm, 0); + vaddr = params.uaddr; + vaddr_end = vaddr + params.len; + + for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i++) { + unsigned long psize, pmask; + int level = PG_LEVEL_4K; + gpa_t gpa; + + if (!hva_to_gpa(kvm, vaddr, &gpa)) { + ret = -EINVAL; + goto e_unpin; + } + + psize = page_level_size(level); + pmask = page_level_mask(level); + gpa = gpa & pmask; + + /* Transition the page state to pre-guest */ + memset(&e, 0, sizeof(e)); + e.assigned = 1; + e.gpa = gpa; + e.asid = sev_get_asid(kvm); + e.immutable = true; + e.pagesize = X86_TO_RMP_PG_LEVEL(level); + ret = rmpupdate(inpages[i], &e); + if (ret) { + ret = -EFAULT; + goto e_unpin; + } + + data.address = __sme_page_pa(inpages[i]); + data.page_size = e.pagesize; + data.page_type = params.page_type; + ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE, &data, error); + if (ret) { + snp_page_reclaim(inpages[i], e.pagesize); + goto e_unpin; + } + + next_vaddr = (vaddr & pmask) + psize; + } + +e_unpin: + /* Content of memory is updated, mark pages dirty */ + memset(&e, 0, sizeof(e)); + for (i = 0; i < npages; i++) { + set_page_dirty_lock(inpages[i]); + mark_page_accessed(inpages[i]); + + /* + * If its an error, then update RMP entry to change page ownership + * to the hypervisor. + */ + if (ret) + rmpupdate(inpages[i], &e); + } + + /* Unlock the user pages */ + sev_unpin_memory(kvm, inpages, npages); + + return ret; +} + int svm_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1697,6 +1833,9 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_SEV_SNP_LAUNCH_START: r = snp_launch_start(kvm, &sev_cmd); break; + case KVM_SEV_SNP_LAUNCH_UPDATE: + r = snp_launch_update(kvm, &sev_cmd); + break; default: r = -EINVAL; goto out; diff --git a/include/linux/sev.h b/include/linux/sev.h index bcd4d75d87c8..82e804a2ee0d 100644 --- a/include/linux/sev.h +++ b/include/linux/sev.h @@ -36,8 +36,10 @@ struct __packed rmpentry { /* RMP page size */ #define RMP_PG_SIZE_4K 0 +#define RMP_PG_SIZE_2M 1 #define RMP_TO_X86_PG_LEVEL(level) (((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M) +#define X86_TO_RMP_PG_LEVEL(level) (((level) == PG_LEVEL_4K) ? RMP_PG_SIZE_4K : RMP_PG_SIZE_2M) struct rmpupdate { u64 gpa; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 56ab5576741e..8890d5a340be 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1681,6 +1681,7 @@ enum sev_cmd_id { /* SNP specific commands */ KVM_SEV_SNP_INIT = 255, KVM_SEV_SNP_LAUNCH_START, + KVM_SEV_SNP_LAUNCH_UPDATE, KVM_SEV_NR_MAX, }; @@ -1786,6 +1787,23 @@ struct kvm_sev_snp_launch_start { __u8 gosvw[16]; }; +#define KVM_SEV_SNP_PAGE_TYPE_NORMAL 0x1 +#define KVM_SEV_SNP_PAGE_TYPE_VMSA 0x2 +#define KVM_SEV_SNP_PAGE_TYPE_ZERO 0x3 +#define KVM_SEV_SNP_PAGE_TYPE_UNMEASURED 0x4 +#define KVM_SEV_SNP_PAGE_TYPE_SECRETS 0x5 +#define KVM_SEV_SNP_PAGE_TYPE_CPUID 0x6 + +struct kvm_sev_snp_launch_update { + __u64 uaddr; + __u32 len; + __u8 imi_page; + __u8 page_type; + __u8 vmpl3_perms; + __u8 vmpl2_perms; + __u8 vmpl1_perms; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) -- 2.17.1