From: Vishal Annapurve <vannapurve@xxxxxxxxxx> Introduce HVA range operator so that other KVM subsystems can operate on HVA ranges. Signed-off-by: Vishal Annapurve <vannapurve@xxxxxxxxxx> [mdr: minor checkpatch alignment fixups] Signed-off-by: Michael Roth <michael.roth@xxxxxxx> --- include/linux/kvm_host.h | 6 +++++ virt/kvm/kvm_main.c | 49 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a2a8331fbb94..bc3a468e97e3 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1429,6 +1429,12 @@ void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end); void kvm_mmu_invalidate_end(struct kvm *kvm); bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); +typedef int (*kvm_hva_range_op_t)(struct kvm *kvm, + struct kvm_gfn_range *range, void *data); + +int kvm_vm_do_hva_range_op(struct kvm *kvm, unsigned long hva_start, + unsigned long hva_end, kvm_hva_range_op_t handler, void *data); + long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); long kvm_arch_vcpu_ioctl(struct file *filp, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4fd0fb0044f5..03243a7ece08 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -667,6 +667,55 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm, return r; } +int kvm_vm_do_hva_range_op(struct kvm *kvm, unsigned long hva_start, + unsigned long hva_end, kvm_hva_range_op_t handler, void *data) +{ + int ret = 0; + struct kvm_gfn_range gfn_range; + struct kvm_memory_slot *slot; + struct kvm_memslots *slots; + int i, idx; + + if (WARN_ON_ONCE(hva_end <= hva_start)) + return -EINVAL; + + idx = srcu_read_lock(&kvm->srcu); + + for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) { + struct interval_tree_node *node; + + slots = __kvm_memslots(kvm, i); + kvm_for_each_memslot_in_hva_range(node, slots, + hva_start, hva_end - 1) { + unsigned long start, end; + + slot = container_of(node, struct kvm_memory_slot, + hva_node[slots->node_idx]); + start = max(hva_start, slot->userspace_addr); + end = min(hva_end, slot->userspace_addr + + (slot->npages << PAGE_SHIFT)); + + /* + * {gfn(page) | page intersects with [hva_start, hva_end)} = + * {gfn_start, gfn_start+1, ..., gfn_end-1}. + */ + gfn_range.start = hva_to_gfn_memslot(start, slot); + gfn_range.end = hva_to_gfn_memslot(end + PAGE_SIZE - 1, slot); + gfn_range.slot = slot; + + ret = handler(kvm, &gfn_range, data); + if (ret) + goto e_ret; + } + } + +e_ret: + srcu_read_unlock(&kvm->srcu, idx); + + return ret; +} +EXPORT_SYMBOL_GPL(kvm_vm_do_hva_range_op); + static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn, unsigned long start, unsigned long end, -- 2.25.1