[Qemu-devel] [PATCH] ivshmem: fix guest unable to start with ioeventfd

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Hongyong Zang <zanghongyong@xxxxxxxxxx>

When a guest boots with ioeventfd, an error (by gdb) occurs:
  Program received signal SIGSEGV, Segmentation fault.
  0x00000000006009cc in setup_ioeventfds (s=0x171dc40)
      at /home/louzhengwei/git_source/qemu-kvm/hw/ivshmem.c:363
  363             for (j = 0; j < s->peers[i].nb_eventfds; j++) {
The bug is due to accessing s->peers which is NULL.

This patch uses the memory region API to replace the old one kvm_set_ioeventfd_mmio_long().
And this patch makes memory_region_add_eventfd() called in ivshmem_read() when qemu receives
eventfd information from ivshmem_server.

Signed-off-by: Hongyong Zang <zanghongyong@xxxxxxxxxx>
---
 hw/ivshmem.c |   41 ++++++++++++++---------------------------
 1 files changed, 14 insertions(+), 27 deletions(-)

diff --git a/hw/ivshmem.c b/hw/ivshmem.c
index 242fbea..be26f03 100644
--- a/hw/ivshmem.c
+++ b/hw/ivshmem.c
@@ -58,7 +58,6 @@ typedef struct IVShmemState {
     CharDriverState *server_chr;
     MemoryRegion ivshmem_mmio;
 
-    pcibus_t mmio_addr;
     /* We might need to register the BAR before we actually have the memory.
      * So prepare a container MemoryRegion for the BAR immediately and
      * add a subregion when we have the memory.
@@ -346,8 +345,14 @@ static void close_guest_eventfds(IVShmemState *s, int posn)
     guest_curr_max = s->peers[posn].nb_eventfds;
 
     for (i = 0; i < guest_curr_max; i++) {
-        kvm_set_ioeventfd_mmio_long(s->peers[posn].eventfds[i],
-                    s->mmio_addr + DOORBELL, (posn << 16) | i, 0);
+        if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
+            memory_region_del_eventfd(&s->ivshmem_mmio,
+                                     DOORBELL,
+                                     4,
+                                     true,
+                                     (posn << 16) | i,
+                                     s->peers[posn].eventfds[i]);
+        }
         close(s->peers[posn].eventfds[i]);
     }
 
@@ -355,22 +360,6 @@ static void close_guest_eventfds(IVShmemState *s, int posn)
     s->peers[posn].nb_eventfds = 0;
 }
 
-static void setup_ioeventfds(IVShmemState *s) {
-
-    int i, j;
-
-    for (i = 0; i <= s->max_peer; i++) {
-        for (j = 0; j < s->peers[i].nb_eventfds; j++) {
-            memory_region_add_eventfd(&s->ivshmem_mmio,
-                                      DOORBELL,
-                                      4,
-                                      true,
-                                      (i << 16) | j,
-                                      s->peers[i].eventfds[j]);
-        }
-    }
-}
-
 /* this function increase the dynamic storage need to store data about other
  * guests */
 static void increase_dynamic_storage(IVShmemState *s, int new_min_size) {
@@ -491,10 +480,12 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
     }
 
     if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
-        if (kvm_set_ioeventfd_mmio_long(incoming_fd, s->mmio_addr + DOORBELL,
-                        (incoming_posn << 16) | guest_max_eventfd, 1) < 0) {
-            fprintf(stderr, "ivshmem: ioeventfd not available\n");
-        }
+        memory_region_add_eventfd(&s->ivshmem_mmio,
+                                  DOORBELL,
+                                  4,
+                                  true,
+                                  (incoming_posn << 16) | guest_max_eventfd,
+                                  incoming_fd);
     }
 
     return;
@@ -659,10 +650,6 @@ static int pci_ivshmem_init(PCIDevice *dev)
     memory_region_init_io(&s->ivshmem_mmio, &ivshmem_mmio_ops, s,
                           "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
 
-    if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
-        setup_ioeventfds(s);
-    }
-
     /* region for registers*/
     pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
                      &s->ivshmem_mmio);
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux