Re: [PATCH][RFC] post copy chardevice (was Re: [RFC] postcopy livemigration proposal)

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Sample user land program for testing the post copy chardevice.

===========================================================================
/*
 * sample user land for post copy vmem
 *
 * Copyright (c) 2011,
 * National Institute of Advanced Industrial Science and Technology
 *
 * https://sites.google.com/site/grivonhome/quick-kvm-migration
 * Author: Isaku Yamahata <yamahata at valinux co jp>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 */

#include <err.h>
#include <inttypes.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>

//#include <linux/kvm.h>
#define __user
#include "my-kvm.h"

#if 1
#define DPRINTF(format, ...) \
	printf("%s:%d "format, __func__, __LINE__, ## __VA_ARGS__)
#else
#define DPRINTF(format, ...)	do { } while (0)
#endif

#define VMEM_NR_PAGES	8

void server(int vmem_fd, int shmem_fd, size_t size, size_t page_size)
{
	int nr_pages = size / page_size;

	void* shmem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
			   shmem_fd, 0);
	if (shmem == MAP_FAILED) {
		err(EXIT_FAILURE, "server: mmap(\"shmem\")");
	}
	close(shmem_fd);

	DPRINTF("KVM_VMEM_READY\n");
	if (ioctl(vmem_fd, KVM_VMEM_READY) < 0) {
		err(EXIT_FAILURE, "server: KVM_VMEM_READY");
	}

	struct kvm_vmem_page_request page_request;
	page_request.pgoffs = malloc(sizeof(*page_request.pgoffs) * nr_pages);
	if (page_request.pgoffs == NULL) {
		err(EXIT_FAILURE, "server: malloc(\"page_request.pgoffs\")");
	}

	struct kvm_vmem_page_cached page_cached;
	page_cached.pgoffs = malloc(sizeof(*page_cached.pgoffs) * nr_pages);
	if (page_cached.pgoffs == NULL) {
		err(EXIT_FAILURE, "server: malloc(\"page_cached.pgoffs\")");
	}

	int fill = 0;
	fill++;
	memset(shmem, fill, page_size);

	page_cached.nr = 1;
	page_cached.pgoffs[0] = 0;

	DPRINTF("KVM_VMEM_MARK_PAGE_CACHED\n");
	if (ioctl(vmem_fd, KVM_VMEM_MARK_PAGE_CACHED, &page_cached)) {
		err(EXIT_FAILURE, "server: KVM_VMEM_MARK_PAGE_CACHED");
	}

	struct kvm_vmem_page_range page_range = {
		.pgoff = 0,
		.nr_pages = 1,
	};
	struct kvm_vmem_make_pages_present pages_present = {
		.nr = 1,
		.ranges = &page_range,
	};
	DPRINTF("KVM_VMEM_MAKE_PAGES_PRESENT\n");
	if (ioctl(vmem_fd, KVM_VMEM_MAKE_PAGES_PRESENT, &pages_present) < 0) {
		err(EXIT_FAILURE, "server: KVM_VMEM_MAKE_PAGES_PRESENT");
	}

	int page_served = 1;
	while (page_served < nr_pages) {
		DPRINTF("KVM_VMEM_GET_PAGE_REQUEST\n");
		page_request.nr = nr_pages;
		if (ioctl(vmem_fd, KVM_VMEM_GET_PAGE_REQUEST, &page_request)) {
			err(EXIT_FAILURE, "server: KVM_VMEM_GET_PAGE_REQUEST");
		}

		DPRINTF("request.nr %d\n", page_request.nr);
		page_cached.nr = 0;
		int i;
		for (i = 0; i < page_request.nr; ++i) {
			memset(shmem + page_size * page_request.pgoffs[i],
			       fill, page_size);
			fill++;
			page_cached.pgoffs[page_cached.nr] =
				page_request.pgoffs[i];
			page_cached.nr++;
			DPRINTF("request[%d] %lx fill: %d\n",
				i, (unsigned long)page_request.pgoffs[i],
				fill - 1);
		}
		DPRINTF("KVM_VMEM_MARK_PAGE_CACHED\n");
		if (ioctl(vmem_fd, KVM_VMEM_MARK_PAGE_CACHED,
			  &page_cached) < 0) {
			err(EXIT_FAILURE, "server: KVM_VMEM_MARK_PAGE_CACHED");
		}
		page_served += page_cached.nr;
	}

#if 0
	DPRINTF("KVM_VMEM_MAKE_VMA_ANONYMOUS\n");
	if (ioctl(vmem_fd, KVM_VMEM_MAKE_VMA_ANONYMOUS)) {
		err(EXIT_FAILURE, "server: KVM_VMEM_MAKE_VMA_ANONYMOUS");
	}
#endif
	munmap(shmem, size);
	close(vmem_fd);
}

void qemu(int vmem_fd, size_t size, size_t page_size)
{
	DPRINTF("mmap\n");
	void *ram = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE,
			 vmem_fd, 0);
	if (ram == MAP_FAILED) {
		err(EXIT_FAILURE, "qemu: mmap");
	}

	DPRINTF("KVM_VMEM_WAIT_READY\n");
	if (ioctl(vmem_fd, KVM_VMEM_WAIT_READY) < 0) {
		err(EXIT_FAILURE, "qemu: KVM_VMEM_WAIT_READY");
	}
	DPRINTF("close\n");
	close(vmem_fd);

	int pages[] = {7, 1, 6, 2, 0, 5, 3, 4};
	int val[VMEM_NR_PAGES];
	int i;
	for (i = 0; i < VMEM_NR_PAGES; ++i) {
		if (i == 2 || i == 6)
			sleep(1);
		DPRINTF("access to %d\n", pages[i]);
		fflush(stdout);
		val[i] = *(uint8_t*)(ram + page_size * pages[i]);
		DPRINTF("page:%d val[i=%d]=%d\n", pages[i], i, val[i]);
	}

	munmap(ram, size);
}

int main(int argc, char **argv)
{
	int kvm_fd = open("/dev/kvm", O_RDWR);
	if (kvm_fd < 0) {
		perror("can't open /dev/kvm");
		exit(EXIT_FAILURE);
	}

	int vmem_dev_fd = ioctl(kvm_fd, KVM_CREATE_VMEM_DEV);
	if (vmem_dev_fd < 0) {
		err(EXIT_FAILURE, "can't create vmem_dev");
	}

	long page_size = sysconf(_SC_PAGESIZE);
	struct kvm_vmem_create create = {
		.size = VMEM_NR_PAGES * page_size,
	};
	if (ioctl(vmem_dev_fd, KVM_CREATE_VMEM, &create) < 0) {
		err(EXIT_FAILURE, "KVM_CREATE_VMEM");
	}
	close(vmem_dev_fd);

	int vmem_fd = create.vmem_fd;
	int shmem_fd = create.shmem_fd;
	size_t size = create.size;

	if (ftruncate(shmem_fd, size) < 0) {
		err(EXIT_FAILURE, "truncate(\"shmem_fd\")");
	}

	printf("vmem_fd %d shmem_fd %d\n", vmem_fd, shmem_fd);
	fflush(stdout);

	pid_t child = fork();
	if (child < 0) {
		err(EXIT_FAILURE, "fork");
	}
	if (child == 0) {
		sleep(1);
		printf("server pid: %d\n", getpid());
		server(vmem_fd, shmem_fd, size, page_size);
		return 0;
	}

	printf("qemu pid: %d server pid: %d\n", getpid(), child);
	close(shmem_fd);
	qemu(vmem_fd, size, page_size);
	return 0;
}

===========================================================================
-- 
yamahata
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux