Re: [PATCH] kvm tools, vesa: Use guest-mapped memory for framebuffer

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 06.06.2011, at 15:51, Pekka Enberg wrote:

> This patch converts hw/vesa.c to use guest-mapped memory for framebuffer and
> drops the slow MMIO emulation. This speeds up framebuffer accesses
> considerably. Please note that this can be optimized even more with the
> KVM_GET_DIRTY_LOG ioctl() as explained by Alexander Graf.
> 
> Cc: Alexander Graf <agraf@xxxxxxx>
> Cc: Cyrill Gorcunov <gorcunov@xxxxxxxxx>
> Cc: Ingo Molnar <mingo@xxxxxxx>
> Cc: John Floren <john@xxxxxxxxxxx>
> Cc: Sasha Levin <levinsasha928@xxxxxxxxx>
> Signed-off-by: Pekka Enberg <penberg@xxxxxxxxxx>
> ---
> tools/kvm/hw/vesa.c         |   17 +++++------------
> tools/kvm/include/kvm/kvm.h |    1 +
> tools/kvm/kvm.c             |    8 ++++----
> 3 files changed, 10 insertions(+), 16 deletions(-)
> 
> diff --git a/tools/kvm/hw/vesa.c b/tools/kvm/hw/vesa.c
> index 48d31ce..be9c109 100644
> --- a/tools/kvm/hw/vesa.c
> +++ b/tools/kvm/hw/vesa.c
> @@ -8,6 +8,7 @@
> #include "kvm/irq.h"
> #include "kvm/kvm.h"
> #include "kvm/pci.h"
> +#include <sys/mman.h>
> 
> #include <sys/types.h>
> #include <sys/ioctl.h>
> @@ -40,14 +41,6 @@ static struct pci_device_header vesa_pci_device = {
> 	.bar[1]			= VESA_MEM_ADDR | PCI_BASE_ADDRESS_SPACE_MEMORY,
> };
> 
> -static void vesa_mmio_callback(u64 addr, u8 *data, u32 len, u8 is_write)
> -{
> -	if (!is_write)
> -		return;
> -
> -	fb__write(addr, data, len);
> -}
> -
> static struct framebuffer vesafb;
> 
> struct framebuffer *vesa__init(struct kvm *kvm)
> @@ -65,12 +58,12 @@ struct framebuffer *vesa__init(struct kvm *kvm)
> 	vesa_pci_device.bar[0]		= vesa_base_addr | PCI_BASE_ADDRESS_SPACE_IO;
> 	pci__register(&vesa_pci_device, dev);
> 
> -	kvm__register_mmio(kvm, VESA_MEM_ADDR, VESA_MEM_SIZE, &vesa_mmio_callback);
> -
> -	mem = calloc(1, VESA_MEM_SIZE);
> -	if (!mem)
> +	mem = mmap(NULL, VESA_MEM_SIZE, PROT_RW, MAP_ANON_NORESERVE, -1, 0);
> +	if (mem == MAP_FAILED)
> 		return NULL;
> 
> +	kvm__register_mem_slot(kvm, 1, VESA_MEM_ADDR, VESA_MEM_SIZE, mem);

Slot 1

> +
> 	vesafb = (struct framebuffer) {
> 		.width			= VESA_WIDTH,
> 		.height			= VESA_HEIGHT,
> diff --git a/tools/kvm/include/kvm/kvm.h b/tools/kvm/include/kvm/kvm.h
> index 55551de..0628402 100644
> --- a/tools/kvm/include/kvm/kvm.h
> +++ b/tools/kvm/include/kvm/kvm.h
> @@ -49,6 +49,7 @@ void kvm__stop_timer(struct kvm *kvm);
> void kvm__irq_line(struct kvm *kvm, int irq, int level);
> bool kvm__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int size, u32 count);
> bool kvm__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write);
> +void kvm__register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, u64 size, void *userspace_addr);
> bool kvm__register_mmio(struct kvm *kvm, u64 phys_addr, u64 phys_addr_len, void (*kvm_mmio_callback_fn)(u64 addr, u8 *data, u32 len, u8 is_write));
> bool kvm__deregister_mmio(struct kvm *kvm, u64 phys_addr);
> void kvm__pause(void);
> diff --git a/tools/kvm/kvm.c b/tools/kvm/kvm.c
> index 54e3203..de642c7 100644
> --- a/tools/kvm/kvm.c
> +++ b/tools/kvm/kvm.c
> @@ -162,7 +162,7 @@ static bool kvm__cpu_supports_vm(void)
> 	return regs.ecx & (1 << feature);
> }
> 
> -static void kvm_register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, u64 size, void *userspace_addr)
> +void kvm__register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, u64 size, void *userspace_addr)
> {
> 	struct kvm_userspace_memory_region mem;
> 	int ret;
> @@ -200,7 +200,7 @@ void kvm__init_ram(struct kvm *kvm)
> 		phys_size  = kvm->ram_size;
> 		host_mem   = kvm->ram_start;
> 
> -		kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
> +		kvm__register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
> 	} else {
> 		/* First RAM range from zero to the PCI gap: */
> 
> @@ -208,7 +208,7 @@ void kvm__init_ram(struct kvm *kvm)
> 		phys_size  = KVM_32BIT_GAP_START;
> 		host_mem   = kvm->ram_start;
> 
> -		kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
> +		kvm__register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
> 
> 		/* Second RAM range from 4GB to the end of RAM: */
> 
> @@ -216,7 +216,7 @@ void kvm__init_ram(struct kvm *kvm)
> 		phys_size  = kvm->ram_size - phys_size;
> 		host_mem   = kvm->ram_start + phys_start;
> 
> -		kvm_register_mem_slot(kvm, 1, phys_start, phys_size, host_mem);
> +		kvm__register_mem_slot(kvm, 1, phys_start, phys_size, host_mem);

... and also slot 1. Is this on purpose or could there be potential overwriting?


Alex

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux