[RFC PATCH 21/28] kvm: mmu: Integrate the direct mmu with the changed pte notifier

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Implements arch specific handler functions for the changed pte MMU
notifier. This handler uses the paging structure walk iterator and is
needed to allow the main MM to update page permissions safely on pages
backing guest memory.

Signed-off-by: Ben Gardon <bgardon@xxxxxxxxxx>
---
 arch/x86/kvm/mmu.c | 53 ++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 51 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ca9b3f574f401..b144c803c36d2 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2386,7 +2386,6 @@ static bool direct_walk_iterator_next_present_pte(
 /*
  * As direct_walk_iterator_next_present_pte but skips over non-leaf ptes.
  */
-__attribute__((unused))
 static bool direct_walk_iterator_next_present_leaf_pte(
 		struct direct_walk_iterator *iter)
 {
@@ -2867,9 +2866,59 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
 	return r;
 }
 
+static int set_direct_pte_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+			      gfn_t start, gfn_t end, unsigned long pte)
+{
+	struct direct_walk_iterator iter;
+	pte_t host_pte;
+	kvm_pfn_t new_pfn;
+	u64 new_pte;
+
+	host_pte.pte = pte;
+	new_pfn = pte_pfn(host_pte);
+
+	direct_walk_iterator_setup_walk(&iter, kvm, slot->as_id, start, end,
+					MMU_WRITE_LOCK);
+	while (direct_walk_iterator_next_present_leaf_pte(&iter)) {
+		BUG_ON(iter.level != PT_PAGE_TABLE_LEVEL);
+
+		if (pte_write(host_pte))
+			new_pte = 0;
+		else {
+			new_pte = iter.old_pte & ~PT64_BASE_ADDR_MASK;
+			new_pte |= new_pfn << PAGE_SHIFT;
+			new_pte &= ~PT_WRITABLE_MASK;
+			new_pte &= ~SPTE_HOST_WRITEABLE;
+			new_pte &= ~shadow_dirty_mask;
+			new_pte &= ~shadow_accessed_mask;
+			new_pte = mark_spte_for_access_track(new_pte);
+		}
+
+		if (!direct_walk_iterator_set_pte(&iter, new_pte))
+			continue;
+	}
+	return direct_walk_iterator_end_traversal(&iter);
+}
+
+static int set_direct_pte_hva(struct kvm *kvm, unsigned long address,
+			    pte_t host_pte)
+{
+	return kvm_handle_direct_hva_range(kvm, address, address + 1,
+					   host_pte.pte, set_direct_pte_gfn);
+}
+
 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
 {
-	return kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
+	int need_flush = 0;
+
+	WARN_ON(pte_huge(pte));
+
+	if (kvm->arch.direct_mmu_enabled)
+		need_flush |= set_direct_pte_hva(kvm, hva, pte);
+	if (!kvm->arch.pure_direct_mmu)
+		need_flush |= kvm_handle_hva(kvm, hva, (unsigned long)&pte,
+					     kvm_set_pte_rmapp);
+	return need_flush;
 }
 
 static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
-- 
2.23.0.444.g18eeb5a265-goog




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux