During configuration of the BAR addresses, a Linux guest disables and enables access to I/O and memory space. When access is disabled, we don't stop emulating the memory regions described by the BARs. Now that we have callbacks for activating and deactivating emulation for a BAR region, let's use that to stop emulation when access is disabled, and re-activate it when access is re-enabled. The vesa emulation hasn't been designed with toggling on and off in mind, so refuse writes to the PCI command register that disable memory or IO access. Signed-off-by: Alexandru Elisei <alexandru.elisei@xxxxxxx> --- hw/vesa.c | 16 ++++++++++++++++ pci.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/hw/vesa.c b/hw/vesa.c index 74ebebbefa6b..3044a86078fb 100644 --- a/hw/vesa.c +++ b/hw/vesa.c @@ -81,6 +81,18 @@ static int vesa__bar_deactivate(struct kvm *kvm, return -EINVAL; } +static void vesa__pci_cfg_write(struct kvm *kvm, struct pci_device_header *pci_hdr, + u8 offset, void *data, int sz) +{ + u32 value; + + if (offset == PCI_COMMAND) { + memcpy(&value, data, sz); + value |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY); + memcpy(data, &value, sz); + } +} + struct framebuffer *vesa__init(struct kvm *kvm) { struct vesa_dev *vdev; @@ -114,6 +126,10 @@ struct framebuffer *vesa__init(struct kvm *kvm) .bar_size[1] = VESA_MEM_SIZE, }; + vdev->pci_hdr.cfg_ops = (struct pci_config_operations) { + .write = vesa__pci_cfg_write, + }; + vdev->fb = (struct framebuffer) { .width = VESA_WIDTH, .height = VESA_HEIGHT, diff --git a/pci.c b/pci.c index 5412f2defa2e..98331a1fc205 100644 --- a/pci.c +++ b/pci.c @@ -157,6 +157,42 @@ static struct ioport_operations pci_config_data_ops = { .io_out = pci_config_data_out, }; +static void pci_config_command_wr(struct kvm *kvm, + struct pci_device_header *pci_hdr, + u16 new_command) +{ + int i; + bool toggle_io, toggle_mem; + + toggle_io = (pci_hdr->command ^ new_command) & PCI_COMMAND_IO; + toggle_mem = (pci_hdr->command ^ new_command) & PCI_COMMAND_MEMORY; + + for (i = 0; i < 6; i++) { + if (!pci_bar_is_implemented(pci_hdr, i)) + continue; + + if (toggle_io && pci__bar_is_io(pci_hdr, i)) { + if (__pci__io_space_enabled(new_command)) + pci_hdr->bar_activate_fn(kvm, pci_hdr, i, + pci_hdr->data); + else + pci_hdr->bar_deactivate_fn(kvm, pci_hdr, i, + pci_hdr->data); + } + + if (toggle_mem && pci__bar_is_memory(pci_hdr, i)) { + if (__pci__memory_space_enabled(new_command)) + pci_hdr->bar_activate_fn(kvm, pci_hdr, i, + pci_hdr->data); + else + pci_hdr->bar_deactivate_fn(kvm, pci_hdr, i, + pci_hdr->data); + } + } + + pci_hdr->command = new_command; +} + void pci__config_wr(struct kvm *kvm, union pci_config_address addr, void *data, int size) { void *base; @@ -182,6 +218,12 @@ void pci__config_wr(struct kvm *kvm, union pci_config_address addr, void *data, if (*(u32 *)(base + offset) == 0) return; + if (offset == PCI_COMMAND) { + memcpy(&value, data, size); + pci_config_command_wr(kvm, pci_hdr, (u16)value); + return; + } + bar = (offset - PCI_BAR_OFFSET(0)) / sizeof(u32); /* -- 2.20.1