[RFC PATCH 4/4] KVM: SEV: Use a bitmap module param to decide whether a cache flush is needed during the guest memory reclaim

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Use a bitmap to provide the flexibility on deciding whether the flush is
needed for a specific mmu notifier event. The cache flush during memory
reclamation was originally introduced to address the cache incoherency
issues in some SME_COHERENT platforms. User may configure the bitmap
depending on the hardware (e.g. No flush needed when SME_COHERENT can
extend to DMA devices) or userspace VMM (e.g. No flush needed when VMM
ensures guest memory is properly unpinned).

The bitmap also decouples itself from the mmu_notifier_event type to
provide a consistent interface. The parameter remains same behavior
regardless of the changes to mmu notifier event in future kernel
versions. When a new mmu notifier event is added, the new event will be
defaulted to BIT(0) so that no additional cache flush will be
accidentally introduced.

Signed-off-by: Jacky Li <jackyli@xxxxxxxxxx>
Signed-off-by: Mingwei Zhang <mizhang@xxxxxxxxxx>
---
 arch/x86/kvm/svm/sev.c       | 47 +++++++++++++++++++++++++++++++++---
 include/linux/mmu_notifier.h |  4 +++
 2 files changed, 47 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 477df8a06629..6e7530b4ae5d 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -65,6 +65,47 @@ module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444);
 #define sev_es_debug_swap_enabled false
 #endif /* CONFIG_KVM_AMD_SEV */
 
+#define MMU_NOTIFY_OTHERS_BIT BIT_ULL(0)
+#define MMU_NOTIFY_UNMAP_BIT BIT_ULL(1)
+#define MMU_NOTIFY_CLEAR_BIT BIT_ULL(2)
+#define MMU_NOTIFY_PROTECTION_VMA_BIT BIT_ULL(3)
+#define MMU_NOTIFY_PROTECTION_PAGE_BIT BIT_ULL(4)
+#define MMU_NOTIFY_SOFT_DIRTY_BIT BIT_ULL(5)
+#define MMU_NOTIFY_RELEASE_BIT BIT_ULL(6)
+#define MMU_NOTIFY_MIGRATE_BIT BIT_ULL(7)
+#define MMU_NOTIFY_EXCLUSIVE_BIT BIT_ULL(8)
+
+/*
+ * Explicitly decouple with the mmu_notifier_event enum, so that the interface
+ * (i.e. bit definitions in the module param bitmp) remains the same when the
+ * original enum get updated.
+ */
+static const int mmu_notifier_event_map[NR_MMU_NOTIFY_EVENTS] = {
+	[MMU_NOTIFY_UNMAP] = MMU_NOTIFY_UNMAP_BIT,
+	[MMU_NOTIFY_CLEAR] = MMU_NOTIFY_CLEAR_BIT,
+	[MMU_NOTIFY_PROTECTION_VMA] = MMU_NOTIFY_PROTECTION_VMA_BIT,
+	[MMU_NOTIFY_PROTECTION_PAGE] = MMU_NOTIFY_PROTECTION_PAGE_BIT,
+	[MMU_NOTIFY_SOFT_DIRTY] = MMU_NOTIFY_SOFT_DIRTY_BIT,
+	[MMU_NOTIFY_RELEASE] = MMU_NOTIFY_RELEASE_BIT,
+	[MMU_NOTIFY_MIGRATE] = MMU_NOTIFY_MIGRATE_BIT,
+	[MMU_NOTIFY_EXCLUSIVE] = MMU_NOTIFY_EXCLUSIVE_BIT
+};
+unsigned long flush_on_mmu_notifier_event_bitmap = MMU_NOTIFY_UNMAP_BIT |
+	MMU_NOTIFY_CLEAR_BIT | MMU_NOTIFY_RELEASE_BIT | MMU_NOTIFY_MIGRATE_BIT;
+EXPORT_SYMBOL_GPL(flush_on_mmu_notifier_event_bitmap);
+module_param(flush_on_mmu_notifier_event_bitmap, ulong, 0644);
+MODULE_PARM_DESC(flush_on_mmu_notifier_event_bitmap,
+"Whether a cache flush is needed when the sev guest memory is reclaimed with a specific mmu notifier event.\n"
+"\tBit 0 (0x01)  left to any event not yet defined in the map\n"
+"\tBit 1 (0x02)  corresponds to MMU_NOTIFY_UNMAP event\n"
+"\tBit 2 (0x04)  corresponds to MMU_NOTIFY_CLEAR event\n"
+"\tBit 3 (0x08)  corresponds to MMU_NOTIFY_PROTECTION_VMA event\n"
+"\tBit 4 (0x10)  corresponds to MMU_NOTIFY_PROTECTION_PAGE event\n"
+"\tBit 5 (0x20)  corresponds to MMU_NOTIFY_SOFT_DIRTY event\n"
+"\tBit 6 (0x80)  corresponds to MMU_NOTIFY_RELEASE event\n"
+"\tBit 7 (0x100) corresponds to MMU_NOTIFY_MIGRATE event\n"
+"\tBit 8 (0x200) corresponds to MMU_NOTIFY_EXCLUSIVE event");
+
 static u8 sev_enc_bit;
 static DECLARE_RWSEM(sev_deactivate_lock);
 static DEFINE_MUTEX(sev_bitmap_lock);
@@ -2335,10 +2376,8 @@ void sev_guest_memory_reclaimed(struct kvm *kvm,
 	if (!sev_guest(kvm))
 		return;
 
-	if (mmu_notifier_event == MMU_NOTIFY_UNMAP ||
-	    mmu_notifier_event == MMU_NOTIFY_CLEAR ||
-	    mmu_notifier_event == MMU_NOTIFY_RELEASE ||
-	    mmu_notifier_event == MMU_NOTIFY_MIGRATE)
+	if (mmu_notifier_event_map[mmu_notifier_event] &
+	    flush_on_mmu_notifier_event_bitmap)
 		wbinvd_on_all_cpus();
 }
 
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index f349e08a9dfe..b40db51d76a4 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -47,6 +47,9 @@ struct mmu_interval_notifier;
  * longer have exclusive access to the page. When sent during creation of an
  * exclusive range the owner will be initialised to the value provided by the
  * caller of make_device_exclusive_range(), otherwise the owner will be NULL.
+ *
+ * @NR_MMU_NOTIFY_EVENTS: number of mmu notifier events, should always be at
+ * the end of the enum list.
  */
 enum mmu_notifier_event {
 	MMU_NOTIFY_UNMAP = 0,
@@ -57,6 +60,7 @@ enum mmu_notifier_event {
 	MMU_NOTIFY_RELEASE,
 	MMU_NOTIFY_MIGRATE,
 	MMU_NOTIFY_EXCLUSIVE,
+	NR_MMU_NOTIFY_EVENTS,
 };
 
 #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
-- 
2.43.0.rc0.421.g78406f8d94-goog





[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux