[PATCH] kvm: Flush coalesced MMIO buffer periodly

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The default action of coalesced MMIO is, cache the writing in buffer, until:
1. The buffer is full.
2. Or the exit to QEmu due to other reasons.

But this would result in a very late writing in some condition.
1. The each time write to MMIO content is small.
2. The writing interval is big.
3. No need for input or accessing other devices frequently.

This issue was observed in a experimental embbed system. The test image
simply print "test" every 1 seconds. The output in QEmu meets expectation,
but the output in KVM is delayed for seconds.

Per Avi's suggestion, I add a periodly flushing coalesced MMIO buffer in
QEmu IO thread. By this way, We don't need vcpu explicit exit to QEmu to
handle this issue. Current synchronize rate is 1/25s.

Signed-off-by: Sheng Yang <sheng@xxxxxxxxxxxxxxx>
---
 qemu-kvm.c |   47 +++++++++++++++++++++++++++++++++++++++++++++--
 qemu-kvm.h |    2 ++
 2 files changed, 47 insertions(+), 2 deletions(-)

diff --git a/qemu-kvm.c b/qemu-kvm.c
index 599c3d6..38f890c 100644
--- a/qemu-kvm.c
+++ b/qemu-kvm.c
@@ -463,6 +463,12 @@ static void kvm_create_vcpu(CPUState *env, int id)
         goto err_fd;
     }
 
+#ifdef KVM_CAP_COALESCED_MMIO
+    if (kvm_state->coalesced_mmio && !kvm_state->coalesced_mmio_ring)
+        kvm_state->coalesced_mmio_ring = (void *) env->kvm_run +
+		kvm_state->coalesced_mmio * PAGE_SIZE;
+#endif
+
     return;
   err_fd:
     close(env->kvm_fd);
@@ -927,8 +933,7 @@ int kvm_run(CPUState *env)
 
 #if defined(KVM_CAP_COALESCED_MMIO)
     if (kvm_state->coalesced_mmio) {
-        struct kvm_coalesced_mmio_ring *ring =
-            (void *) run + kvm_state->coalesced_mmio * PAGE_SIZE;
+        struct kvm_coalesced_mmio_ring *ring = kvm_state->coalesced_mmio_ring;
         while (ring->first != ring->last) {
             cpu_physical_memory_rw(ring->coalesced_mmio[ring->first].phys_addr,
                            &ring->coalesced_mmio[ring->first].data[0],
@@ -2073,6 +2078,29 @@ static void io_thread_wakeup(void *opaque)
     }
 }
 
+#ifdef KVM_CAP_COALESCED_MMIO
+
+/* flush interval is 1/25 second */
+#define KVM_COALESCED_MMIO_FLUSH_INTERVAL    40000000LL
+
+static void flush_coalesced_mmio_buffer(void *opaque)
+{
+    if (kvm_state->coalesced_mmio_ring) {
+        struct kvm_coalesced_mmio_ring *ring =
+            kvm_state->coalesced_mmio_ring;
+        while (ring->first != ring->last) {
+            cpu_physical_memory_rw(ring->coalesced_mmio[ring->first].phys_addr,
+                           &ring->coalesced_mmio[ring->first].data[0],
+                           ring->coalesced_mmio[ring->first].len, 1);
+            smp_wmb();
+            ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
+        }
+    }
+    qemu_mod_timer(kvm_state->coalesced_mmio_timer,
+           qemu_get_clock(host_clock) + KVM_COALESCED_MMIO_FLUSH_INTERVAL);
+}
+#endif
+
 int kvm_main_loop(void)
 {
     int fds[2];
@@ -2117,6 +2145,15 @@ int kvm_main_loop(void)
     io_thread_sigfd = sigfd;
     cpu_single_env = NULL;
 
+#ifdef KVM_CAP_COALESCED_MMIO
+    if (kvm_state->coalesced_mmio) {
+        kvm_state->coalesced_mmio_timer =
+            qemu_new_timer(host_clock, flush_coalesced_mmio_buffer, NULL);
+        qemu_mod_timer(kvm_state->coalesced_mmio_timer,
+            qemu_get_clock(host_clock) + KVM_COALESCED_MMIO_FLUSH_INTERVAL);
+    }
+#endif
+
     while (1) {
         main_loop_wait(1000);
         if (qemu_shutdown_requested()) {
@@ -2135,6 +2172,12 @@ int kvm_main_loop(void)
         }
     }
 
+#ifdef KVM_CAP_COALESCED_MMIO
+    if (kvm_state->coalesced_mmio) {
+        qemu_del_timer(kvm_state->coalesced_mmio_timer);
+        qemu_free_timer(kvm_state->coalesced_mmio_timer);
+    }
+#endif
     pause_all_threads();
     pthread_mutex_unlock(&qemu_mutex);
 
diff --git a/qemu-kvm.h b/qemu-kvm.h
index 6b3e5a1..17f9d1b 100644
--- a/qemu-kvm.h
+++ b/qemu-kvm.h
@@ -1144,6 +1144,8 @@ typedef struct KVMState {
     int fd;
     int vmfd;
     int coalesced_mmio;
+    struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
+    struct QEMUTimer *coalesced_mmio_timer;
     int broken_set_mem_region;
     int migration_log;
     int vcpu_events;
-- 
1.5.4.5

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux