Re: [PATCH v3 3/6] KVM: arm64: Enable ring-based dirty memory tracking

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi Marc,

On 9/25/22 6:27 AM, Marc Zyngier wrote:
On Thu, 22 Sep 2022 01:32:11 +0100,
Gavin Shan <gshan@xxxxxxxxxx> wrote:

This enables the ring-based dirty memory tracking on ARM64. The
feature is configured by CONFIG_HAVE_KVM_DIRTY_RING, detected and
enabled by KVM_CAP_DIRTY_LOG_RING. A ring buffer is created on every
VCPU when the feature is enabled. Each entry in the ring buffer is
described by 'struct kvm_dirty_gfn'.

A ring buffer entry is pushed when a page becomes dirty on host,
and pulled by userspace after the ring buffer is mapped at physical
page offset KVM_DIRTY_LOG_PAGE_OFFSET. The specific VCPU is enforced
to exit if its ring buffer becomes softly full. Besides, the ring
buffer can be reset by ioctl command KVM_RESET_DIRTY_RINGS to release
those pulled ring buffer entries.

I think you can cut this message short. This description was useful
when the feature was initially merged, but this is only a "plumb the
damn thing" patch.


Agreed. I will modify the changelog in v4 accordingly, thanks.


Signed-off-by: Gavin Shan <gshan@xxxxxxxxxx>
---
  Documentation/virt/kvm/api.rst    | 2 +-
  arch/arm64/include/uapi/asm/kvm.h | 1 +
  arch/arm64/kvm/Kconfig            | 1 +
  arch/arm64/kvm/arm.c              | 8 ++++++++
  4 files changed, 11 insertions(+), 1 deletion(-)

diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index abd7c32126ce..19fa1ac017ed 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -8022,7 +8022,7 @@ regardless of what has actually been exposed through the CPUID leaf.
  8.29 KVM_CAP_DIRTY_LOG_RING
  ---------------------------
-:Architectures: x86
+:Architectures: x86, arm64
  :Parameters: args[0] - size of the dirty log ring
KVM is capable of tracking dirty memory using ring buffers that are
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 316917b98707..a7a857f1784d 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -43,6 +43,7 @@
  #define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_DIRTY_LOG_PAGE_OFFSET 64
#define KVM_REG_SIZE(id) \
  	(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 815cc118c675..0309b2d0f2da 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -32,6 +32,7 @@ menuconfig KVM
  	select KVM_VFIO
  	select HAVE_KVM_EVENTFD
  	select HAVE_KVM_IRQFD
+	select HAVE_KVM_DIRTY_RING
  	select HAVE_KVM_MSI
  	select HAVE_KVM_IRQCHIP
  	select HAVE_KVM_IRQ_ROUTING
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 2ff0ef62abad..76816f8e082b 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -747,6 +747,14 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
  			return kvm_vcpu_suspend(vcpu);
+
+		if (kvm_check_request(KVM_REQ_RING_SOFT_FULL, vcpu) &&
+		    kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
+			kvm_make_request(KVM_REQ_RING_SOFT_FULL, vcpu);
+			vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
+			trace_kvm_dirty_ring_exit(vcpu);
+			return 0;
+		}

This is *very* similar to the x86 code. Could we move it to common
code? Something like the diff below, to be for most of it squashed
into patch #1.


Sure, the additional changes make sense to me. I will fold them to
PATCH[v4 1/6].

Thanks,
Gavin


diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 76816f8e082b..93a16cdbe163 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -748,13 +748,8 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
  		if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
  			return kvm_vcpu_suspend(vcpu);
- if (kvm_check_request(KVM_REQ_RING_SOFT_FULL, vcpu) &&
-		    kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
-			kvm_make_request(KVM_REQ_RING_SOFT_FULL, vcpu);
-			vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
-			trace_kvm_dirty_ring_exit(vcpu);
+		if (kvm_dirty_ring_check_request(vcpu))
  			return 0;
-		}
  	}
return 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index eb7d0d7654bb..48f2519b1db7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10249,11 +10249,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_request_pending(vcpu)) {
  		/* Forbid vmenter if vcpu dirty ring is soft-full */
-		if (kvm_check_request(KVM_REQ_RING_SOFT_FULL, vcpu) &&
-		    kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
-			kvm_make_request(KVM_REQ_RING_SOFT_FULL, vcpu);
-			vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
-			trace_kvm_dirty_ring_exit(vcpu);
+		if (kvm_dirty_ring_check_request(vcpu)) {
  			r = 0;
  			goto out;
  		}
diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h
index 8c6755981c9b..6e484220adc0 100644
--- a/include/linux/kvm_dirty_ring.h
+++ b/include/linux/kvm_dirty_ring.h
@@ -64,11 +64,6 @@ static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
  {
  }
-static inline bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
-{
-	return true;
-}
-
  #else /* CONFIG_HAVE_KVM_DIRTY_RING */
int kvm_cpu_dirty_log_size(void);
@@ -91,7 +86,7 @@ void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset);
  struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset);
void kvm_dirty_ring_free(struct kvm_dirty_ring *ring);
-bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring);
+bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu);
#endif /* CONFIG_HAVE_KVM_DIRTY_RING */ diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c
index 69c8c90d489d..436d7cded5bf 100644
--- a/virt/kvm/dirty_ring.c
+++ b/virt/kvm/dirty_ring.c
@@ -26,7 +26,7 @@ static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring)
  	return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
  }
-bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
+static bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
  {
  	return kvm_dirty_ring_used(ring) >= ring->soft_limit;
  }
@@ -182,3 +182,16 @@ void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
  	vfree(ring->dirty_gfns);
  	ring->dirty_gfns = NULL;
  }
+
+bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu)
+{
+	if (kvm_check_request(KVM_REQ_RING_SOFT_FULL, vcpu) &&
+	    kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
+		kvm_make_request(KVM_REQ_RING_SOFT_FULL, vcpu);
+		vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
+		trace_kvm_dirty_ring_exit(vcpu);
+		return true;
+	}
+
+	return false;
+}


_______________________________________________
kvmarm mailing list
kvmarm@xxxxxxxxxxxxxxxxxxxxx
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm



[Index of Archives]     [Linux KVM]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux