On 02/02/21 19:57, Ben Gardon wrote:
Add a read / write lock to be used in place of the MMU spinlock on x86.
The rwlock will enable the TDP MMU to handle page faults, and other
operations in parallel in future commits.
Reviewed-by: Peter Feiner <pfeiner@xxxxxxxxxx>
Signed-off-by: Ben Gardon <bgardon@xxxxxxxxxx>
---
v1 -> v2
- Removed MMU lock wrappers
- Completely replaced the MMU spinlock with an rwlock for x86
arch/x86/include/asm/kvm_host.h | 2 +
arch/x86/kvm/mmu/mmu.c | 90 ++++++++++++++++-----------------
arch/x86/kvm/mmu/page_track.c | 8 +--
arch/x86/kvm/mmu/paging_tmpl.h | 8 +--
arch/x86/kvm/mmu/tdp_mmu.c | 20 ++++----
arch/x86/kvm/x86.c | 4 +-
include/linux/kvm_host.h | 5 ++
virt/kvm/dirty_ring.c | 10 ++++
virt/kvm/kvm_main.c | 46 +++++++++++------
9 files changed, 112 insertions(+), 81 deletions(-)
Let's create a new header file, to abstract this even more.
diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c
index 70ba572f6e5c..3eeb8c0e9590 100644
--- a/virt/kvm/dirty_ring.c
+++ b/virt/kvm/dirty_ring.c
@@ -9,6 +9,7 @@
#include <linux/vmalloc.h>
#include <linux/kvm_dirty_ring.h>
#include <trace/events/kvm.h>
+#include "mmu_lock.h"
int __weak kvm_cpu_dirty_log_size(void)
{
@@ -60,19 +61,9 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32
slot, u64 offset, u64 mask)
if (!memslot || (offset + __fls(mask)) >= memslot->npages)
return;
-#ifdef KVM_HAVE_MMU_RWLOCK
- write_lock(&kvm->mmu_lock);
-#else
- spin_lock(&kvm->mmu_lock);
-#endif /* KVM_HAVE_MMU_RWLOCK */
-
+ KVM_MMU_LOCK(kvm);
kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask);
-
-#ifdef KVM_HAVE_MMU_RWLOCK
- write_unlock(&kvm->mmu_lock);
-#else
- spin_unlock(&kvm->mmu_lock);
-#endif /* KVM_HAVE_MMU_RWLOCK */
+ KVM_MMU_UNLOCK(kvm);
}
int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6f0c1473b474..356068103f8a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -58,6 +58,7 @@
#include "coalesced_mmio.h"
#include "async_pf.h"
+#include "mmu_lock.h"
#include "vfio.h"
#define CREATE_TRACE_POINTS
@@ -450,14 +451,6 @@ static void
kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
srcu_read_unlock(&kvm->srcu, idx);
}
-#ifdef KVM_HAVE_MMU_RWLOCK
-#define KVM_MMU_LOCK(kvm) write_lock(&kvm->mmu_lock)
-#define KVM_MMU_UNLOCK(kvm) write_unlock(&kvm->mmu_lock)
-#else
-#define KVM_MMU_LOCK(kvm) spin_lock(&kvm->mmu_lock)
-#define KVM_MMU_UNLOCK(kvm) spin_unlock(&kvm->mmu_lock)
-#endif /* KVM_HAVE_MMU_RWLOCK */
-
static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address,
@@ -755,11 +748,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
if (!kvm)
return ERR_PTR(-ENOMEM);
-#ifdef KVM_HAVE_MMU_RWLOCK
- rwlock_init(&kvm->mmu_lock);
-#else
- spin_lock_init(&kvm->mmu_lock);
-#endif /* KVM_HAVE_MMU_RWLOCK */
+ KVM_MMU_LOCK_INIT(kvm);
mmgrab(current->mm);
kvm->mm = current->mm;
kvm_eventfd_init(kvm);
diff --git a/virt/kvm/mmu_lock.h b/virt/kvm/mmu_lock.h
new file mode 100644
index 000000000000..1dd1ca2cdc77
--- /dev/null
+++ b/virt/kvm/mmu_lock.h
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#ifndef KVM_MMU_LOCK_H
+#define KVM_MMU_LOCK_H 1
+
+/*
+ * Architectures can choose whether to use an rwlock or spinlock
+ * for the mmu_lock. These macros, for use in common code
+ * only, avoids using #ifdefs in places that must deal with
+ * multiple architectures.
+ */
+
+#ifdef KVM_HAVE_MMU_RWLOCK
+#define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock)
+#define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock)
+#define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock)
+#else
+#define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock)
+#define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
+#define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
+#endif /* KVM_HAVE_MMU_RWLOCK */
+
+#endif