Re: [PATCH 12/13] KVM: x86: add KVM_MEM_X86_SMRAM memory slot flag

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 04/30/2015 02:36 PM, Paolo Bonzini wrote:
This adds an arch-specific memslot flag that hides slots unless the
VCPU is in system management mode.

Some care is needed in order to limit the overhead of x86_gfn_to_memslot
when compared with gfn_to_memslot.  Thankfully, we have __gfn_to_memslot
and search_memslots which are the same, so we can add some extra output
to search_memslots.  The compiler will optimize it as dead code in
__gfn_to_memslot, and will use it to thread jumps in x86_gfn_to_memslot.

struct kvm_memory_slot *x86_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
  {
-	struct kvm_memory_slot *slot = gfn_to_memslot(vcpu->kvm, gfn);
+	/* By using search_memslots directly the compiler can optimize away
+	 * the "if (found)" check below.
+         *
+	 * It cannot do the same for gfn_to_memslot because it is not inlined,
+	 * and it also cannot do the same for __gfn_to_memslot because the
+	 * kernel is compiled with -fno-delete-null-pointer-checks.
+	 */
+	bool found;
+	struct kvm_memslots *memslots = kvm_memslots(vcpu->kvm);
+	struct kvm_memory_slot *slot = search_memslots(memslots, gfn, &found);
+
+	if (found && unlikely(slot->flags & KVM_MEM_X86_SMRAM) && !is_smm(vcpu))
+		return NULL;

Alternative approach: store the memslot pointer in the vcpu, instead of of vcpu->kvm. The uapi interface code can generate two memslot tables to be stored in kvm->, one for smm and one for !smm.

This is a little more generic and can be used for other asymmetric memory maps causes (the only one I can think of, mapping the APIC to different physical addresses, can't be efficiently supported).

  	return slot;
  }
@@ -112,7 +125,15 @@ EXPORT_SYMBOL_GPL(x86_read_guest);
  int x86_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
  			      gpa_t gpa, unsigned long len)
  {
-	return kvm_gfn_to_hva_cache_init(kvm, ghc, gpa, len);
+	int r = kvm_gfn_to_hva_cache_init(kvm, ghc, gpa, len);
+
+	if (r < 0)
+		return r;
+
+	/* Use slow path for reads and writes to SMRAM.  */
+	if (ghc->memslot && (ghc->memslot->flags & KVM_MEM_X86_SMRAM))
+		ghc->memslot = NULL;
+	return r;
  }
  EXPORT_SYMBOL_GPL(x86_gfn_to_hva_cache_init);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e7cfee09970a..ea3db5d89f67 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -800,16 +800,18 @@ static inline void kvm_guest_exit(void)
   * gfn_to_memslot() itself isn't here as an inline because that would
   * bloat other code too much.
   */
-static inline struct kvm_memory_slot *
-search_memslots(struct kvm_memslots *slots, gfn_t gfn)
+static __always_inline struct kvm_memory_slot *
+search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool *found)
  {
  	int start = 0, end = slots->used_slots;
  	int slot = atomic_read(&slots->lru_slot);
  	struct kvm_memory_slot *memslots = slots->memslots;
if (gfn >= memslots[slot].base_gfn &&
-	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
+	    gfn < memslots[slot].base_gfn + memslots[slot].npages) {
+		*found = true;
  		return &memslots[slot];
+	}
while (start < end) {
  		slot = start + (end - start) / 2;
@@ -823,16 +825,20 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn)
  	if (gfn >= memslots[start].base_gfn &&
  	    gfn < memslots[start].base_gfn + memslots[start].npages) {
  		atomic_set(&slots->lru_slot, start);
+		*found = true;
  		return &memslots[start];
  	}
+ *found = false;
  	return NULL;
  }
static inline struct kvm_memory_slot *
  __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
  {
-	return search_memslots(slots, gfn);
+	bool found;
+
+	return search_memslots(slots, gfn, &found);
  }
static inline unsigned long
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index e82c46b9860f..b8b76106a003 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -716,6 +716,10 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
  #ifdef __KVM_HAVE_READONLY_MEM
  	valid_flags |= KVM_MEM_READONLY;
  #endif
+#ifdef __KVM_ARCH_VALID_FLAGS
+	BUILD_BUG_ON(__KVM_ARCH_VALID_FLAGS & KVM_MEMSLOT_INVALID);
+	valid_flags |= __KVM_ARCH_VALID_FLAGS;
+#endif
if (mem->flags & ~valid_flags)
  		return -EINVAL;

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux