[PATCH v2] kvm tools, vesa: Use guest-mapped memory for framebuffer

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch converts hw/vesa.c to use guest-mapped memory for framebuffer and
drops the slow MMIO emulation. This speeds up framebuffer accesses
considerably. Please note that this can be optimized even more with the
KVM_GET_DIRTY_LOG ioctl() as explained by Alexander Graf.

Cc: Alexander Graf <agraf@xxxxxxx>
Cc: Cyrill Gorcunov <gorcunov@xxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxx>
Cc: John Floren <john@xxxxxxxxxxx>
Cc: Sasha Levin <levinsasha928@xxxxxxxxx>
Signed-off-by: Pekka Enberg <penberg@xxxxxxxxxx>
---
v1 -> v2: Fix mem slot index passed to KVM_SET_USER_MEMORY_REGION

 tools/kvm/hw/vesa.c         |   17 +++++------------
 tools/kvm/include/kvm/kvm.h |    3 +++
 tools/kvm/kvm.c             |   10 +++++-----
 3 files changed, 13 insertions(+), 17 deletions(-)

diff --git a/tools/kvm/hw/vesa.c b/tools/kvm/hw/vesa.c
index 48d31ce..71322fc 100644
--- a/tools/kvm/hw/vesa.c
+++ b/tools/kvm/hw/vesa.c
@@ -8,6 +8,7 @@
 #include "kvm/irq.h"
 #include "kvm/kvm.h"
 #include "kvm/pci.h"
+#include <sys/mman.h>
 
 #include <sys/types.h>
 #include <sys/ioctl.h>
@@ -40,14 +41,6 @@ static struct pci_device_header vesa_pci_device = {
 	.bar[1]			= VESA_MEM_ADDR | PCI_BASE_ADDRESS_SPACE_MEMORY,
 };
 
-static void vesa_mmio_callback(u64 addr, u8 *data, u32 len, u8 is_write)
-{
-	if (!is_write)
-		return;
-
-	fb__write(addr, data, len);
-}
-
 static struct framebuffer vesafb;
 
 struct framebuffer *vesa__init(struct kvm *kvm)
@@ -65,12 +58,12 @@ struct framebuffer *vesa__init(struct kvm *kvm)
 	vesa_pci_device.bar[0]		= vesa_base_addr | PCI_BASE_ADDRESS_SPACE_IO;
 	pci__register(&vesa_pci_device, dev);
 
-	kvm__register_mmio(kvm, VESA_MEM_ADDR, VESA_MEM_SIZE, &vesa_mmio_callback);
-
-	mem = calloc(1, VESA_MEM_SIZE);
-	if (!mem)
+	mem = mmap(NULL, VESA_MEM_SIZE, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
+	if (mem == MAP_FAILED)
 		return NULL;
 
+	kvm__register_mem(kvm, VESA_MEM_ADDR, VESA_MEM_SIZE, mem);
+
 	vesafb = (struct framebuffer) {
 		.width			= VESA_WIDTH,
 		.height			= VESA_HEIGHT,
diff --git a/tools/kvm/include/kvm/kvm.h b/tools/kvm/include/kvm/kvm.h
index 55551de..17b7557 100644
--- a/tools/kvm/include/kvm/kvm.h
+++ b/tools/kvm/include/kvm/kvm.h
@@ -21,6 +21,8 @@ struct kvm {
 
 	int			nrcpus;		/* Number of cpus to run */
 
+	u32			mem_slots;	/* for KVM_SET_USER_MEMORY_REGION */
+
 	u64			ram_size;
 	void			*ram_start;
 
@@ -49,6 +51,7 @@ void kvm__stop_timer(struct kvm *kvm);
 void kvm__irq_line(struct kvm *kvm, int irq, int level);
 bool kvm__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int size, u32 count);
 bool kvm__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write);
+void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr);
 bool kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write));
 bool kvm__deregister_mmio(struct kvm *kvm, u64 phys_addr);
 void kvm__pause(void);
diff --git a/tools/kvm/kvm.c b/tools/kvm/kvm.c
index 54e3203..65e94a1 100644
--- a/tools/kvm/kvm.c
+++ b/tools/kvm/kvm.c
@@ -162,13 +162,13 @@ static bool kvm__cpu_supports_vm(void)
 	return regs.ecx & (1 << feature);
 }
 
-static void kvm_register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, u64 size, void *userspace_addr)
+void kvm__register_mem(struct kvm *kvm, u64 guest_phys, u64 size, void *userspace_addr)
 {
 	struct kvm_userspace_memory_region mem;
 	int ret;
 
 	mem = (struct kvm_userspace_memory_region) {
-		.slot			= slot,
+		.slot			= kvm->mem_slots++,
 		.guest_phys_addr	= guest_phys,
 		.memory_size		= size,
 		.userspace_addr		= (unsigned long)userspace_addr,
@@ -200,7 +200,7 @@ void kvm__init_ram(struct kvm *kvm)
 		phys_size  = kvm->ram_size;
 		host_mem   = kvm->ram_start;
 
-		kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
+		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
 	} else {
 		/* First RAM range from zero to the PCI gap: */
 
@@ -208,7 +208,7 @@ void kvm__init_ram(struct kvm *kvm)
 		phys_size  = KVM_32BIT_GAP_START;
 		host_mem   = kvm->ram_start;
 
-		kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
+		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
 
 		/* Second RAM range from 4GB to the end of RAM: */
 
@@ -216,7 +216,7 @@ void kvm__init_ram(struct kvm *kvm)
 		phys_size  = kvm->ram_size - phys_size;
 		host_mem   = kvm->ram_start + phys_start;
 
-		kvm_register_mem_slot(kvm, 1, phys_start, phys_size, host_mem);
+		kvm__register_mem(kvm, phys_start, phys_size, host_mem);
 	}
 }
 
-- 
1.7.0.4

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux