Remove the enc_region structure definition and the code which maintained it, as they are no longer needed in view of the xarray support we added in the previous patch. Leave svm_register_enc_region() and svm_unregister_enc_region() as stubs since the ioctl is used by qemu and qemu will crash if they do not return 0. Co-developed-by: Brijesh Singh <brijesh.singh@xxxxxxx> Signed-off-by: eric van tassell <Eric.VanTassell@xxxxxxx> --- arch/x86/kvm/svm/sev.c | 117 +---------------------------------------- arch/x86/kvm/svm/svm.h | 1 - 2 files changed, 1 insertion(+), 117 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 4a0157254fef..635e15f01edb 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -27,14 +27,6 @@ static unsigned long *sev_asid_bitmap; static unsigned long *sev_reclaim_asid_bitmap; #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) -struct enc_region { - struct list_head list; - unsigned long npages; - struct page **pages; - unsigned long uaddr; - unsigned long size; -}; - static int sev_flush_asids(void) { int ret, error = 0; @@ -182,7 +174,6 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) sev->active = true; sev->asid = asid; - INIT_LIST_HEAD(&sev->regions_list); xa_init(&sev->pages_xarray); @@ -1074,113 +1065,18 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp) int svm_register_enc_region(struct kvm *kvm, struct kvm_enc_region *range) { - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct enc_region *region; - int ret = 0; - - if (!sev_guest(kvm)) - return -ENOTTY; - - if (range->addr > ULONG_MAX || range->size > ULONG_MAX) - return -EINVAL; - - region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT); - if (!region) - return -ENOMEM; - - region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1); - if (IS_ERR(region->pages)) { - ret = PTR_ERR(region->pages); - goto e_free; - } - - /* - * The guest may change the memory encryption attribute from C=0 -> C=1 - * or vice versa for this memory range. Lets make sure caches are - * flushed to ensure that guest data gets written into memory with - * correct C-bit. - */ - sev_clflush_pages(region->pages, region->npages); - - region->uaddr = range->addr; - region->size = range->size; - - mutex_lock(&kvm->lock); - list_add_tail(®ion->list, &sev->regions_list); - mutex_unlock(&kvm->lock); - - return ret; - -e_free: - kfree(region); - return ret; -} - -static struct enc_region * -find_enc_region(struct kvm *kvm, struct kvm_enc_region *range) -{ - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct list_head *head = &sev->regions_list; - struct enc_region *i; - - list_for_each_entry(i, head, list) { - if (i->uaddr == range->addr && - i->size == range->size) - return i; - } - - return NULL; -} - -static void __unregister_enc_region_locked(struct kvm *kvm, - struct enc_region *region) -{ - sev_unpin_memory(kvm, region->pages, region->npages); - list_del(®ion->list); - kfree(region); + return 0; } int svm_unregister_enc_region(struct kvm *kvm, struct kvm_enc_region *range) { - struct enc_region *region; - int ret; - - mutex_lock(&kvm->lock); - - if (!sev_guest(kvm)) { - ret = -ENOTTY; - goto failed; - } - - region = find_enc_region(kvm, range); - if (!region) { - ret = -EINVAL; - goto failed; - } - - /* - * Ensure that all guest tagged cache entries are flushed before - * releasing the pages back to the system for use. CLFLUSH will - * not do this, so issue a WBINVD. - */ - wbinvd_on_all_cpus(); - - __unregister_enc_region_locked(kvm, region); - - mutex_unlock(&kvm->lock); return 0; - -failed: - mutex_unlock(&kvm->lock); - return ret; } void sev_vm_destroy(struct kvm *kvm) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct list_head *head = &sev->regions_list; - struct list_head *pos, *q; XA_STATE(xas, &sev->pages_xarray, 0); struct page *xa_page; @@ -1196,17 +1092,6 @@ void sev_vm_destroy(struct kvm *kvm) */ wbinvd_on_all_cpus(); - /* - * if userspace was terminated before unregistering the memory regions - * then lets unpin all the registered memory. - */ - if (!list_empty(head)) { - list_for_each_safe(pos, q, head) { - __unregister_enc_region_locked(kvm, - list_entry(pos, struct enc_region, list)); - } - } - /* Release each pinned page that SEV tracked in sev->pages_xarray. */ xas_for_each(&xas, xa_page, ULONG_MAX) { put_page(xa_page); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 278c46bc52aa..98d3d7b299cb 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -64,7 +64,6 @@ struct kvm_sev_info { unsigned int handle; /* SEV firmware handle */ int fd; /* SEV device fd */ unsigned long pages_locked; /* Number of pages locked */ - struct list_head regions_list; /* List of registered regions */ struct xarray pages_xarray; /* List of PFN locked */ }; -- 2.17.1