Expose KVM_MEM_PRIVATE and memslot fields private_fd/offset to userspace. KVM will register/unregister private memslot to fd-based memory backing store and response to invalidation event from inaccessible_notifier to zap the existing memory mappings in the secondary page table. Whether KVM_MEM_PRIVATE is actually exposed to userspace is determined by architecture code which can turn on it by overriding the default kvm_arch_has_private_mem(). A 'kvm' reference is added in memslot structure since in inaccessible_notifier callback we can only obtain a memslot reference but 'kvm' is needed to do the zapping. Co-developed-by: Yu Zhang <yu.c.zhang@xxxxxxxxxxxxxxx> Signed-off-by: Yu Zhang <yu.c.zhang@xxxxxxxxxxxxxxx> Signed-off-by: Chao Peng <chao.p.peng@xxxxxxxxxxxxxxx> --- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 116 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 111 insertions(+), 6 deletions(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index b9906cdf468b..cb4eefac709c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -589,6 +589,7 @@ struct kvm_memory_slot { struct file *private_file; loff_t private_offset; struct inaccessible_notifier notifier; + struct kvm *kvm; }; static inline bool kvm_slot_can_be_private(const struct kvm_memory_slot *slot) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 97d893f7482c..87e239d35b96 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -983,6 +983,57 @@ static int kvm_vm_ioctl_set_mem_attr(struct kvm *kvm, gpa_t gpa, gpa_t size, xa_erase(&kvm->mem_attr_array, index); return r; } + +static void kvm_private_notifier_invalidate(struct inaccessible_notifier *notifier, + pgoff_t start, pgoff_t end) +{ + struct kvm_memory_slot *slot = container_of(notifier, + struct kvm_memory_slot, + notifier); + unsigned long base_pgoff = slot->private_offset >> PAGE_SHIFT; + gfn_t start_gfn = slot->base_gfn; + gfn_t end_gfn = slot->base_gfn + slot->npages; + + + if (start > base_pgoff) + start_gfn = slot->base_gfn + start - base_pgoff; + + if (end < base_pgoff + slot->npages) + end_gfn = slot->base_gfn + end - base_pgoff; + + if (start_gfn >= end_gfn) + return; + + kvm_zap_gfn_range(slot->kvm, start_gfn, end_gfn); +} + +static struct inaccessible_notifier_ops kvm_private_notifier_ops = { + .invalidate = kvm_private_notifier_invalidate, +}; + +static inline void kvm_private_mem_register(struct kvm_memory_slot *slot) +{ + slot->notifier.ops = &kvm_private_notifier_ops; + inaccessible_register_notifier(slot->private_file, &slot->notifier); +} + +static inline void kvm_private_mem_unregister(struct kvm_memory_slot *slot) +{ + inaccessible_unregister_notifier(slot->private_file, &slot->notifier); +} + +#else /* !CONFIG_HAVE_KVM_PRIVATE_MEM */ + +static inline void kvm_private_mem_register(struct kvm_memory_slot *slot) +{ + WARN_ON_ONCE(1); +} + +static inline void kvm_private_mem_unregister(struct kvm_memory_slot *slot) +{ + WARN_ON_ONCE(1); +} + #endif /* CONFIG_HAVE_KVM_PRIVATE_MEM */ #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER @@ -1029,6 +1080,11 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) /* This does not remove the slot from struct kvm_memslots data structures */ static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { + if (slot->flags & KVM_MEM_PRIVATE) { + kvm_private_mem_unregister(slot); + fput(slot->private_file); + } + kvm_destroy_dirty_bitmap(slot); kvm_arch_free_memslot(kvm, slot); @@ -1600,10 +1656,16 @@ bool __weak kvm_arch_has_private_mem(struct kvm *kvm) return false; } -static int check_memory_region_flags(const struct kvm_user_mem_region *mem) +static int check_memory_region_flags(struct kvm *kvm, + const struct kvm_user_mem_region *mem) { u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; +#ifdef CONFIG_HAVE_KVM_PRIVATE_MEM + if (kvm_arch_has_private_mem(kvm)) + valid_flags |= KVM_MEM_PRIVATE; +#endif + #ifdef __KVM_HAVE_READONLY_MEM valid_flags |= KVM_MEM_READONLY; #endif @@ -1679,6 +1741,9 @@ static int kvm_prepare_memory_region(struct kvm *kvm, { int r; + if (change == KVM_MR_CREATE && new->flags & KVM_MEM_PRIVATE) + kvm_private_mem_register(new); + /* * If dirty logging is disabled, nullify the bitmap; the old bitmap * will be freed on "commit". If logging is enabled in both old and @@ -1707,6 +1772,9 @@ static int kvm_prepare_memory_region(struct kvm *kvm, if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap)) kvm_destroy_dirty_bitmap(new); + if (r && change == KVM_MR_CREATE && new->flags & KVM_MEM_PRIVATE) + kvm_private_mem_unregister(new); + return r; } @@ -2004,7 +2072,7 @@ int __kvm_set_memory_region(struct kvm *kvm, int as_id, id; int r; - r = check_memory_region_flags(mem); + r = check_memory_region_flags(kvm, mem); if (r) return r; @@ -2023,6 +2091,10 @@ int __kvm_set_memory_region(struct kvm *kvm, !access_ok((void __user *)(unsigned long)mem->userspace_addr, mem->memory_size)) return -EINVAL; + if (mem->flags & KVM_MEM_PRIVATE && + (mem->private_offset & (PAGE_SIZE - 1) || + mem->private_offset > U64_MAX - mem->memory_size)) + return -EINVAL; if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM) return -EINVAL; if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) @@ -2061,6 +2133,9 @@ int __kvm_set_memory_region(struct kvm *kvm, if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages) return -EINVAL; } else { /* Modify an existing slot. */ + /* Private memslots are immutable, they can only be deleted. */ + if (mem->flags & KVM_MEM_PRIVATE) + return -EINVAL; if ((mem->userspace_addr != old->userspace_addr) || (npages != old->npages) || ((mem->flags ^ old->flags) & KVM_MEM_READONLY)) @@ -2089,10 +2164,27 @@ int __kvm_set_memory_region(struct kvm *kvm, new->npages = npages; new->flags = mem->flags; new->userspace_addr = mem->userspace_addr; + if (mem->flags & KVM_MEM_PRIVATE) { + new->private_file = fget(mem->private_fd); + if (!new->private_file) { + r = -EINVAL; + goto out; + } + new->private_offset = mem->private_offset; + } + + new->kvm = kvm; r = kvm_set_memslot(kvm, old, new, change); if (r) - kfree(new); + goto out; + + return 0; + +out: + if (new->private_file) + fput(new->private_file); + kfree(new); return r; } EXPORT_SYMBOL_GPL(__kvm_set_memory_region); @@ -4747,16 +4839,28 @@ static long kvm_vm_ioctl(struct file *filp, } case KVM_SET_USER_MEMORY_REGION: { struct kvm_user_mem_region mem; - unsigned long size = sizeof(struct kvm_userspace_memory_region); + unsigned int flags_offset = offsetof(typeof(mem), flags); + unsigned long size; + u32 flags; kvm_sanity_check_user_mem_region_alias(); + memset(&mem, 0, sizeof(mem)); + r = -EFAULT; - if (copy_from_user(&mem, argp, size); + if (get_user(flags, (u32 __user *)(argp + flags_offset))) + goto out; + + if (flags & KVM_MEM_PRIVATE) + size = sizeof(struct kvm_userspace_memory_region_ext); + else + size = sizeof(struct kvm_userspace_memory_region); + + if (copy_from_user(&mem, argp, size)) goto out; r = -EINVAL; - if (mem.flags & KVM_MEM_PRIVATE) + if ((flags ^ mem.flags) & KVM_MEM_PRIVATE) goto out; r = kvm_vm_ioctl_set_memory_region(kvm, &mem); -- 2.25.1