ioeventfd is way provided by KVM to receive notifications about reads and writes to PIO and MMIO areas within the guest. Such notifications are usefull if all we need to know is that a specific area of the memory has been changed, and we don't need a heavyweight exit to happen. The implementation uses epoll to scale to large number of ioeventfds. Signed-off-by: Sasha Levin <levinsasha928@xxxxxxxxx> --- tools/kvm/Makefile | 1 + tools/kvm/include/kvm/ioeventfd.h | 27 ++++++++ tools/kvm/ioeventfd.c | 127 +++++++++++++++++++++++++++++++++++++ tools/kvm/kvm-run.c | 4 + 4 files changed, 159 insertions(+), 0 deletions(-) create mode 100644 tools/kvm/include/kvm/ioeventfd.h create mode 100644 tools/kvm/ioeventfd.c diff --git a/tools/kvm/Makefile b/tools/kvm/Makefile index 2ebc86c..e7ceb5c 100644 --- a/tools/kvm/Makefile +++ b/tools/kvm/Makefile @@ -48,6 +48,7 @@ OBJS += irq.o OBJS += rbtree.o OBJS += util/rbtree-interval.o OBJS += virtio/9p.o +OBJS += ioeventfd.o FLAGS_BFD=$(CFLAGS) -lbfd diff --git a/tools/kvm/include/kvm/ioeventfd.h b/tools/kvm/include/kvm/ioeventfd.h new file mode 100644 index 0000000..fa57b41 --- /dev/null +++ b/tools/kvm/include/kvm/ioeventfd.h @@ -0,0 +1,27 @@ +#ifndef KVM__IOEVENTFD_H +#define KVM__IOEVENTFD_H + +#include <linux/types.h> +#include <linux/list.h> +#include <sys/eventfd.h> + +struct kvm; + +struct ioevent { + u64 start; + u8 len; + void (*ioevent_callback_fn)(struct kvm *kvm, void *ptr); + struct kvm *kvm; + void *ptr; + int event_fd; + u64 datamatch; + + struct list_head list_used; +}; + +void ioeventfd__init(void); +void ioeventfd__start(void); +void ioeventfd__add_event(struct kvm *kvm, struct ioevent *ioevent); +void ioeventfd__del_event(struct kvm *kvm, u64 start, u64 datamatch); + +#endif diff --git a/tools/kvm/ioeventfd.c b/tools/kvm/ioeventfd.c new file mode 100644 index 0000000..5444432 --- /dev/null +++ b/tools/kvm/ioeventfd.c @@ -0,0 +1,127 @@ +#include <sys/epoll.h> +#include <sys/ioctl.h> +#include <pthread.h> +#include <unistd.h> +#include <stdio.h> +#include <signal.h> + +#include <linux/kernel.h> +#include <linux/kvm.h> +#include <linux/types.h> + +#include "kvm/ioeventfd.h" +#include "kvm/kvm.h" +#include "kvm/util.h" + +#define IOEVENTFD_MAX_EVENTS 20 + +static struct epoll_event events[IOEVENTFD_MAX_EVENTS]; +static int epoll_fd; +static LIST_HEAD(used_ioevents); + +void ioeventfd__init(void) +{ + epoll_fd = epoll_create(IOEVENTFD_MAX_EVENTS); + if (epoll_fd < 0) + die("Failed creating epoll fd"); +} + +void ioeventfd__add_event(struct kvm *kvm, struct ioevent *ioevent) +{ + struct kvm_ioeventfd kvm_ioevent; + struct epoll_event epoll_event; + struct ioevent *new_ioevent; + int event; + + new_ioevent = malloc(sizeof(*new_ioevent)); + if (new_ioevent == NULL) + die("Failed allocating memory for new ioevent"); + + *new_ioevent = *ioevent; + event = new_ioevent->event_fd; + + kvm_ioevent = (struct kvm_ioeventfd) { + .addr = ioevent->start, + .len = ioevent->len, + .datamatch = ioevent->datamatch, + .fd = event, + .flags = KVM_IOEVENTFD_FLAG_PIO | KVM_IOEVENTFD_FLAG_DATAMATCH, + }; + + if (ioctl(kvm->vm_fd, KVM_IOEVENTFD, &kvm_ioevent) != 0) + die("Failed creating new ioeventfd"); + + epoll_event = (struct epoll_event) { + .events = EPOLLIN, + .data.ptr = new_ioevent, + }; + + if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event, &epoll_event) != 0) + die("Failed assigning new event to the epoll fd"); + + list_add_tail(&new_ioevent->list_used, &used_ioevents); +} + +void ioeventfd__del_event(struct kvm *kvm, u64 start, u64 datamatch) +{ + struct kvm_ioeventfd kvm_ioevent; + struct ioevent *ioevent; + u8 found = 0; + + list_for_each_entry(ioevent, &used_ioevents, list_used) { + if (ioevent->start == start) { + found = 1; + break; + } + } + + if (found == 0 || ioevent == NULL) + return; + + kvm_ioevent = (struct kvm_ioeventfd) { + .addr = ioevent->start, + .len = ioevent->len, + .datamatch = ioevent->datamatch, + .flags = KVM_IOEVENTFD_FLAG_PIO + | KVM_IOEVENTFD_FLAG_DEASSIGN + | KVM_IOEVENTFD_FLAG_DATAMATCH, + }; + + ioctl(kvm->vm_fd, KVM_IOEVENTFD, &kvm_ioevent); + + epoll_ctl(epoll_fd, EPOLL_CTL_DEL, ioevent->event_fd, NULL); + + list_del(&ioevent->list_used); + + close(ioevent->event_fd); + free(ioevent); +} + +static void *ioeventfd__thread(void *param) +{ + for (;;) { + int nfds, i; + + nfds = epoll_wait(epoll_fd, events, IOEVENTFD_MAX_EVENTS, -1); + for (i = 0; i < nfds; i++) { + u64 tmp; + struct ioevent *ioevent; + + ioevent = events[i].data.ptr; + + if (read(ioevent->event_fd, &tmp, sizeof(tmp)) < 0) + die("Failed reading event"); + + ioevent->ioevent_callback_fn(ioevent->kvm, ioevent->ptr); + } + } + + return NULL; +} + +void ioeventfd__start(void) +{ + pthread_t thread; + + pthread_create(&thread, NULL, ioeventfd__thread, NULL); +} diff --git a/tools/kvm/kvm-run.c b/tools/kvm/kvm-run.c index f384ddd..48b8e70 100644 --- a/tools/kvm/kvm-run.c +++ b/tools/kvm/kvm-run.c @@ -29,6 +29,7 @@ #include <kvm/symbol.h> #include <kvm/virtio-9p.h> #include <kvm/vesa.h> +#include <kvm/ioeventfd.h> /* header files for gitish interface */ #include <kvm/kvm-run.h> @@ -505,6 +506,8 @@ int kvm_cmd_run(int argc, const char **argv, const char *prefix) kvm = kvm__init(kvm_dev, ram_size); + ioeventfd__init(); + max_cpus = kvm__max_cpus(kvm); if (nrcpus > max_cpus) { @@ -612,6 +615,7 @@ int kvm_cmd_run(int argc, const char **argv, const char *prefix) vesa__init(kvm); thread_pool__init(nr_online_cpus); + ioeventfd__start(); for (i = 0; i < nrcpus; i++) { if (pthread_create(&kvm_cpus[i]->thread, NULL, kvm_cpu_thread, kvm_cpus[i]) != 0) -- 1.7.5.rc3 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html