On Sun, Nov 13, 2011 at 8:56 PM, <zanghongyong@xxxxxxxxxx> wrote: > From: Hongyong Zang <zanghongyong@xxxxxxxxxx> > > Ivshmem(nahanni) is a mechanism for sharing host memory with VMs running on the same host. Currently, guest notifies qemu by reading or writing ivshmem device's PCI MMIO BAR0(Doorbell). > > This patch, changes this PCI MMIO BAR0(Doorbell) to PIO. And we find guest accesses PIO BAR 30% faster than MMIO BAR. Nice work :) > > Test it with: > Call 5,000,000 times writing PCI BAR0's DOORBELL register, we got the total time as follows: > linux command #time: > MMIO(regular interrupt) PIO(regular interrupt) MMIO(msi+ioeventfd) PIO(msi+ioeventfd) > real 101.441s 68.863s 70.720s 49.521s > user 0.391s 0.305s 0.404s 0.340s > sys 46.308s 30.634s 38.740s 27.559s Did you pin the VMs to cores? You're sending between 50000-100000 notifications per second, did you confirm that they are all being received? Since eventfds do not buffer, some may be lost at that rate. Of course, one would expect that a single notification should be faster based on these results, but I'm just curious. Do you know of any issues with mapping a PIO region to user-space with the UIO driver framework? Thanks, Cam > > Signed-off-by: Hongyong Zang <zanghongyong@xxxxxxxxxx> > --- > hw/ivshmem.c | 26 +++++++++++++------------- > kvm-all.c | 23 +++++++++++++++++++++++ > kvm.h | 1 + > 3 files changed, 37 insertions(+), 13 deletions(-) > > diff --git a/hw/ivshmem.c b/hw/ivshmem.c > index 242fbea..e68d0a7 100644 > --- a/hw/ivshmem.c > +++ b/hw/ivshmem.c > @@ -28,7 +28,7 @@ > #define IVSHMEM_PEER 0 > #define IVSHMEM_MASTER 1 > > -#define IVSHMEM_REG_BAR_SIZE 0x100 > +#define IVSHMEM_REG_BAR_SIZE 0x10 > > //#define DEBUG_IVSHMEM > #ifdef DEBUG_IVSHMEM > @@ -56,9 +56,9 @@ typedef struct IVShmemState { > > CharDriverState **eventfd_chr; > CharDriverState *server_chr; > - MemoryRegion ivshmem_mmio; > + MemoryRegion ivshmem_pio; > > - pcibus_t mmio_addr; > + pcibus_t pio_addr; > /* We might need to register the BAR before we actually have the memory. > * So prepare a container MemoryRegion for the BAR immediately and > * add a subregion when we have the memory. > @@ -234,7 +234,7 @@ static uint64_t ivshmem_io_read(void *opaque, target_phys_addr_t addr, > return ret; > } > > -static const MemoryRegionOps ivshmem_mmio_ops = { > +static const MemoryRegionOps ivshmem_pio_ops = { > .read = ivshmem_io_read, > .write = ivshmem_io_write, > .endianness = DEVICE_NATIVE_ENDIAN, > @@ -346,8 +346,8 @@ static void close_guest_eventfds(IVShmemState *s, int posn) > guest_curr_max = s->peers[posn].nb_eventfds; > > for (i = 0; i < guest_curr_max; i++) { > - kvm_set_ioeventfd_mmio_long(s->peers[posn].eventfds[i], > - s->mmio_addr + DOORBELL, (posn << 16) | i, 0); > + kvm_set_ioeventfd_pio_long(s->peers[posn].eventfds[i], > + s->pio_addr + DOORBELL, (posn << 16) | i, 0); > close(s->peers[posn].eventfds[i]); > } > > @@ -361,7 +361,7 @@ static void setup_ioeventfds(IVShmemState *s) { > > for (i = 0; i <= s->max_peer; i++) { > for (j = 0; j < s->peers[i].nb_eventfds; j++) { > - memory_region_add_eventfd(&s->ivshmem_mmio, > + memory_region_add_eventfd(&s->ivshmem_pio, > DOORBELL, > 4, > true, > @@ -491,7 +491,7 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags) > } > > if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { > - if (kvm_set_ioeventfd_mmio_long(incoming_fd, s->mmio_addr + DOORBELL, > + if (kvm_set_ioeventfd_pio_long(incoming_fd, s->pio_addr + DOORBELL, > (incoming_posn << 16) | guest_max_eventfd, 1) < 0) { > fprintf(stderr, "ivshmem: ioeventfd not available\n"); > } > @@ -656,16 +656,16 @@ static int pci_ivshmem_init(PCIDevice *dev) > > s->shm_fd = 0; > > - memory_region_init_io(&s->ivshmem_mmio, &ivshmem_mmio_ops, s, > - "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE); > + memory_region_init_io(&s->ivshmem_pio, &ivshmem_pio_ops, s, > + "ivshmem-pio", IVSHMEM_REG_BAR_SIZE); > > if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { > setup_ioeventfds(s); > } > > /* region for registers*/ > - pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, > - &s->ivshmem_mmio); > + pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, > + &s->ivshmem_pio); > > memory_region_init(&s->bar, "ivshmem-bar2-container", s->ivshmem_size); > > @@ -741,7 +741,7 @@ static int pci_ivshmem_uninit(PCIDevice *dev) > { > IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev); > > - memory_region_destroy(&s->ivshmem_mmio); > + memory_region_destroy(&s->ivshmem_pio); > memory_region_del_subregion(&s->bar, &s->ivshmem); > memory_region_destroy(&s->ivshmem); > memory_region_destroy(&s->bar); > diff --git a/kvm-all.c b/kvm-all.c > index 5d500e1..737c2e2 100644 > --- a/kvm-all.c > +++ b/kvm-all.c > @@ -1396,6 +1396,29 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign > return 0; > } > > +int kvm_set_ioeventfd_pio_long(int fd, uint32_t addr, uint32_t val, bool assign) > +{ > + struct kvm_ioeventfd kick = { > + .datamatch = val, > + .addr = addr, > + .len = 4, > + .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO, > + .fd = fd, > + }; > + int r; > + if (!kvm_enabled()) { > + return -ENOSYS; > + } > + if (!assign) { > + kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; > + } > + r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); > + if (r < 0) { > + return r; > + } > + return 0; > +} > + > int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign) > { > struct kvm_ioeventfd kick = { > diff --git a/kvm.h b/kvm.h > index b15e1dd..c2373c9 100644 > --- a/kvm.h > +++ b/kvm.h > @@ -198,6 +198,7 @@ int kvm_set_ioeventfd_mmio_long(int fd, uint32_t adr, uint32_t val, bool assign) > > int kvm_set_irqfd(int gsi, int fd, bool assigned); > > +int kvm_set_ioeventfd_pio_long(int fd, uint32_t adr, uint32_t val, bool assign); > int kvm_set_ioeventfd_pio_word(int fd, uint16_t adr, uint16_t val, bool assign); > > typedef struct KVMMsiMessage { > -- > 1.7.1 > > -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html