Add infrastructure that handles protected memory extension. Arch-specific code has to provide hypercalls and define non-zero VM_KVM_PROTECTED. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> --- include/linux/kvm_host.h | 4 ++ mm/mprotect.c | 1 + virt/kvm/kvm_main.c | 131 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 136 insertions(+) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index bd0bb600f610..d7072f6d6aa0 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -700,6 +700,10 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm); void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); +int kvm_protect_all_memory(struct kvm *kvm); +int kvm_protect_memory(struct kvm *kvm, + unsigned long gfn, unsigned long npages, bool protect); + int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, struct page **pages, int nr_pages); diff --git a/mm/mprotect.c b/mm/mprotect.c index 494192ca954b..552be3b4c80a 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -505,6 +505,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, vm_unacct_memory(charged); return error; } +EXPORT_SYMBOL_GPL(mprotect_fixup); /* * pkey==-1 when doing a legacy mprotect() diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 530af95efdf3..07d45da5d2aa 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -155,6 +155,8 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); static unsigned long long kvm_createvm_count; static unsigned long long kvm_active_vms; +static int protect_memory(unsigned long start, unsigned long end, bool protect); + __weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable) { @@ -1309,6 +1311,14 @@ int __kvm_set_memory_region(struct kvm *kvm, if (r) goto out_bitmap; + if (mem->memory_size && kvm->mem_protected) { + r = protect_memory(new.userspace_addr, + new.userspace_addr + new.npages * PAGE_SIZE, + true); + if (r) + goto out_bitmap; + } + if (old.dirty_bitmap && !new.dirty_bitmap) kvm_destroy_dirty_bitmap(&old); return 0; @@ -2652,6 +2662,127 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn) } EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty); +static int protect_memory(unsigned long start, unsigned long end, bool protect) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma, *prev; + int ret; + + if (down_write_killable(&mm->mmap_sem)) + return -EINTR; + + ret = -ENOMEM; + vma = find_vma(current->mm, start); + if (!vma) + goto out; + + ret = -EINVAL; + if (vma->vm_start > start) + goto out; + + if (start > vma->vm_start) + prev = vma; + else + prev = vma->vm_prev; + + ret = 0; + while (true) { + unsigned long newflags, tmp; + + tmp = vma->vm_end; + if (tmp > end) + tmp = end; + + newflags = vma->vm_flags; + if (protect) + newflags |= VM_KVM_PROTECTED; + else + newflags &= ~VM_KVM_PROTECTED; + + /* The VMA has been handled as part of other memslot */ + if (newflags == vma->vm_flags) + goto next; + + ret = mprotect_fixup(vma, &prev, start, tmp, newflags); + if (ret) + goto out; + +next: + start = tmp; + if (start < prev->vm_end) + start = prev->vm_end; + + if (start >= end) + goto out; + + vma = prev->vm_next; + if (!vma || vma->vm_start != start) { + ret = -ENOMEM; + goto out; + } + } +out: + up_write(&mm->mmap_sem); + return ret; +} + +int kvm_protect_memory(struct kvm *kvm, + unsigned long gfn, unsigned long npages, bool protect) +{ + struct kvm_memory_slot *memslot; + unsigned long start, end; + gfn_t numpages; + + if (!VM_KVM_PROTECTED) + return -KVM_ENOSYS; + + if (!npages) + return 0; + + memslot = gfn_to_memslot(kvm, gfn); + /* Not backed by memory. It's okay. */ + if (!memslot) + return 0; + + start = gfn_to_hva_many(memslot, gfn, &numpages); + end = start + npages * PAGE_SIZE; + + /* XXX: Share range across memory slots? */ + if (WARN_ON(numpages < npages)) + return -EINVAL; + + return protect_memory(start, end, protect); +} +EXPORT_SYMBOL_GPL(kvm_protect_memory); + +int kvm_protect_all_memory(struct kvm *kvm) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + unsigned long start, end; + int i, ret = 0;; + + if (!VM_KVM_PROTECTED) + return -KVM_ENOSYS; + + mutex_lock(&kvm->slots_lock); + kvm->mem_protected = true; + for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { + slots = __kvm_memslots(kvm, i); + kvm_for_each_memslot(memslot, slots) { + start = memslot->userspace_addr; + end = start + memslot->npages * PAGE_SIZE; + ret = protect_memory(start, end, true); + if (ret) + goto out; + } + } +out: + mutex_unlock(&kvm->slots_lock); + return ret; +} +EXPORT_SYMBOL_GPL(kvm_protect_all_memory); + void kvm_sigset_activate(struct kvm_vcpu *vcpu) { if (!vcpu->sigset_active) -- 2.26.2