[RFC PATCH 18/28] kvm: mmu: Add an hva range iterator for memslot GFNs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Factors out a utility for iterating over host virtual address ranges to
get the gfn ranges they map from kvm_handle_hva_range. This moves the
rmap-reliant HVA iterator approach used for shadow paging to a wrapper
around an HVA range to GFN range iterator. Since the direct MMU only
maps each GFN to one physical address, and does not use the rmap, it
can use the GFN ranges directly.

Signed-off-by: Ben Gardon <bgardon@xxxxxxxxxx>
---
 arch/x86/kvm/mmu.c | 96 +++++++++++++++++++++++++++++++---------------
 1 file changed, 66 insertions(+), 30 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3d4a78f2461a9..32426536723c6 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2701,27 +2701,14 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
 	rmap_walk_init_level(iterator, iterator->level);
 }
 
-#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
-	   _start_gfn, _end_gfn, _iter_)				\
-	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
-				 _end_level_, _start_gfn, _end_gfn);	\
-	     slot_rmap_walk_okay(_iter_);				\
-	     slot_rmap_walk_next(_iter_))
-
-static int kvm_handle_hva_range(struct kvm *kvm,
-				unsigned long start,
-				unsigned long end,
-				unsigned long data,
-				int (*handler)(struct kvm *kvm,
-					       struct kvm_rmap_head *rmap_head,
-					       struct kvm_memory_slot *slot,
-					       gfn_t gfn,
-					       int level,
-					       unsigned long data))
+static int kvm_handle_direct_hva_range(struct kvm *kvm, unsigned long start,
+		unsigned long end, unsigned long data,
+		int (*handler)(struct kvm *kvm, struct kvm_memory_slot *memslot,
+			       gfn_t gfn_start, gfn_t gfn_end,
+			       unsigned long data))
 {
 	struct kvm_memslots *slots;
 	struct kvm_memory_slot *memslot;
-	struct slot_rmap_walk_iterator iterator;
 	int ret = 0;
 	int i;
 
@@ -2736,25 +2723,74 @@ static int kvm_handle_hva_range(struct kvm *kvm,
 				      (memslot->npages << PAGE_SHIFT));
 			if (hva_start >= hva_end)
 				continue;
-			/*
-			 * {gfn(page) | page intersects with [hva_start, hva_end)} =
-			 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
-			 */
 			gfn_start = hva_to_gfn_memslot(hva_start, memslot);
-			gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
-
-			for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
-						 PT_MAX_HUGEPAGE_LEVEL,
-						 gfn_start, gfn_end - 1,
-						 &iterator)
-				ret |= handler(kvm, iterator.rmap, memslot,
-					       iterator.gfn, iterator.level, data);
+			gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1,
+						     memslot);
+
+			ret |= handler(kvm, memslot, gfn_start, gfn_end, data);
 		}
 	}
 
 	return ret;
 }
 
+#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,	\
+	   _start_gfn, _end_gfn, _iter_)				\
+	for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,		\
+				 _end_level_, _start_gfn, _end_gfn);	\
+	     slot_rmap_walk_okay(_iter_);				\
+	     slot_rmap_walk_next(_iter_))
+
+
+struct handle_hva_range_shadow_data {
+	unsigned long data;
+	int (*handler)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+		       struct kvm_memory_slot *slot, gfn_t gfn, int level,
+		       unsigned long data);
+};
+
+static int handle_hva_range_shadow_handler(struct kvm *kvm,
+					   struct kvm_memory_slot *memslot,
+					   gfn_t gfn_start, gfn_t gfn_end,
+					   unsigned long data)
+{
+	int ret = 0;
+	struct slot_rmap_walk_iterator iterator;
+	struct handle_hva_range_shadow_data *shadow_data =
+		(struct handle_hva_range_shadow_data *)data;
+
+	for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
+				 PT_MAX_HUGEPAGE_LEVEL,
+				 gfn_start, gfn_end - 1, &iterator) {
+		BUG_ON(!iterator.rmap);
+		ret |= shadow_data->handler(kvm, iterator.rmap, memslot,
+			       iterator.gfn, iterator.level, shadow_data->data);
+	}
+
+	return ret;
+}
+
+static int kvm_handle_hva_range(struct kvm *kvm,
+				unsigned long start,
+				unsigned long end,
+				unsigned long data,
+				int (*handler)(struct kvm *kvm,
+					       struct kvm_rmap_head *rmap_head,
+					       struct kvm_memory_slot *slot,
+					       gfn_t gfn,
+					       int level,
+					       unsigned long data))
+{
+	struct handle_hva_range_shadow_data shadow_data;
+
+	shadow_data.data = data;
+	shadow_data.handler = handler;
+
+	return kvm_handle_direct_hva_range(kvm, start, end,
+					   (unsigned long)&shadow_data,
+					   handle_hva_range_shadow_handler);
+}
+
 /*
  * Marks the range of gfns, [start, end), non-present.
  */
-- 
2.23.0.444.g18eeb5a265-goog




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux