The PCI Local Bus Specification, Rev. 3.0, Section 6.2.5.1. "Address Maps" states: "Devices that map control functions into I/O Space must not consume more than 256 bytes per I/O Base Address register." Yet all the PCI devices allocate IO ports of IOPORT_SIZE (= 1024 bytes). Fix this by having PCI devices use 256 bytes ports for IO BARs. Signed-off-by: Julien Thierry <julien.thierry@xxxxxxx> --- hw/pci-shmem.c | 4 ++-- hw/vesa.c | 4 ++-- include/kvm/ioport.h | 1 - pci.c | 2 +- virtio/pci.c | 14 +++++++------- 5 files changed, 12 insertions(+), 13 deletions(-) diff --git a/hw/pci-shmem.c b/hw/pci-shmem.c index a0c5ba8..2e1474b 100644 --- a/hw/pci-shmem.c +++ b/hw/pci-shmem.c @@ -357,8 +357,8 @@ int pci_shmem__init(struct kvm *kvm) return 0; /* Register MMIO space for MSI-X */ - r = pci_get_io_port_block(IOPORT_SIZE); - r = ioport__register(kvm, r, &shmem_pci__io_ops, IOPORT_SIZE, NULL); + r = pci_get_io_port_block(PCI_IO_SIZE); + r = ioport__register(kvm, r, &shmem_pci__io_ops, PCI_IO_SIZE, NULL); if (r < 0) return r; ivshmem_registers = (u16)r; diff --git a/hw/vesa.c b/hw/vesa.c index 404a8a3..71935d5 100644 --- a/hw/vesa.c +++ b/hw/vesa.c @@ -60,8 +60,8 @@ struct framebuffer *vesa__init(struct kvm *kvm) if (!kvm->cfg.vnc && !kvm->cfg.sdl && !kvm->cfg.gtk) return NULL; - r = pci_get_io_space_block(IOPORT_SIZE); - r = ioport__register(kvm, r, &vesa_io_ops, IOPORT_SIZE, NULL); + r = pci_get_io_space_block(PCI_IO_SIZE); + r = ioport__register(kvm, r, &vesa_io_ops, PCI_IO_SIZE, NULL); if (r < 0) return ERR_PTR(r); diff --git a/include/kvm/ioport.h b/include/kvm/ioport.h index b10fcd5..8c86b71 100644 --- a/include/kvm/ioport.h +++ b/include/kvm/ioport.h @@ -14,7 +14,6 @@ /* some ports we reserve for own use */ #define IOPORT_DBG 0xe0 -#define IOPORT_SIZE 0x400 struct kvm; diff --git a/pci.c b/pci.c index cd749db..228a628 100644 --- a/pci.c +++ b/pci.c @@ -23,7 +23,7 @@ static u16 io_port_blocks = PCI_IOPORT_START; u16 pci_get_io_port_block(u32 size) { - u16 port = ALIGN(io_port_blocks, IOPORT_SIZE); + u16 port = ALIGN(io_port_blocks, PCI_IO_SIZE); io_port_blocks = port + size; return port; } diff --git a/virtio/pci.c b/virtio/pci.c index c8e16dd..5a6c0d0 100644 --- a/virtio/pci.c +++ b/virtio/pci.c @@ -406,7 +406,7 @@ static void virtio_pci__io_mmio_callback(struct kvm_cpu *vcpu, { struct virtio_pci *vpci = ptr; int direction = is_write ? KVM_EXIT_IO_OUT : KVM_EXIT_IO_IN; - u16 port = vpci->port_addr + (addr & (IOPORT_SIZE - 1)); + u16 port = vpci->port_addr + (addr & (PCI_IO_SIZE - 1)); kvm__emulate_io(vcpu, port, data, direction, len, 1); } @@ -420,14 +420,14 @@ int virtio_pci__init(struct kvm *kvm, void *dev, struct virtio_device *vdev, vpci->kvm = kvm; vpci->dev = dev; - r = pci_get_io_port_block(IOPORT_SIZE); - r = ioport__register(kvm, r, &virtio_pci__io_ops, IOPORT_SIZE, vdev); + r = pci_get_io_port_block(PCI_IO_SIZE); + r = ioport__register(kvm, r, &virtio_pci__io_ops, PCI_IO_SIZE, vdev); if (r < 0) return r; vpci->port_addr = (u16)r; - vpci->mmio_addr = pci_get_io_space_block(IOPORT_SIZE); - r = kvm__register_mmio(kvm, vpci->mmio_addr, IOPORT_SIZE, false, + vpci->mmio_addr = pci_get_io_space_block(PCI_IO_SIZE); + r = kvm__register_mmio(kvm, vpci->mmio_addr, PCI_IO_SIZE, false, virtio_pci__io_mmio_callback, vpci); if (r < 0) goto free_ioport; @@ -457,8 +457,8 @@ int virtio_pci__init(struct kvm *kvm, void *dev, struct virtio_device *vdev, | PCI_BASE_ADDRESS_SPACE_MEMORY), .status = cpu_to_le16(PCI_STATUS_CAP_LIST), .capabilities = (void *)&vpci->pci_hdr.msix - (void *)&vpci->pci_hdr, - .bar_size[0] = cpu_to_le32(IOPORT_SIZE), - .bar_size[1] = cpu_to_le32(IOPORT_SIZE), + .bar_size[0] = cpu_to_le32(PCI_IO_SIZE), + .bar_size[1] = cpu_to_le32(PCI_IO_SIZE), .bar_size[2] = cpu_to_le32(PCI_IO_SIZE*2), }; -- 1.9.1 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm