Add virtio pstore device to allow kernel log messages saved on the host. With this patch, it will save the log files under directory given by --pstore option. $ lkvm run --pstore=dir-xx (guest) # echo c > /proc/sysrq-trigger $ ls dir-xx dmesg-0.enc.z dmesg-1.enc.z The log files are usually compressed using zlib. User can easily see the messages on the host or on the guest (using pstore filesystem). Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx> Cc: Radim Krčmář <rkrcmar@xxxxxxxxxx> Cc: "Michael S. Tsirkin" <mst@xxxxxxxxxx> Cc: Anthony Liguori <aliguori@xxxxxxxxxx> Cc: Anton Vorontsov <anton@xxxxxxxxxx> Cc: Colin Cross <ccross@xxxxxxxxxxx> Cc: Kees Cook <keescook@xxxxxxxxxxxx> Cc: Tony Luck <tony.luck@xxxxxxxxx> Cc: Steven Rostedt <rostedt@xxxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: kvm@xxxxxxxxxxxxxxx Cc: virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx Signed-off-by: Namhyung Kim <namhyung@xxxxxxxxxx> --- Makefile | 1 + builtin-run.c | 2 + include/kvm/kvm-config.h | 1 + include/kvm/virtio-pci-dev.h | 2 + include/kvm/virtio-pstore.h | 31 ++++ include/linux/virtio_ids.h | 1 + virtio/pstore.c | 359 +++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 397 insertions(+) create mode 100644 include/kvm/virtio-pstore.h create mode 100644 virtio/pstore.c diff --git a/Makefile b/Makefile index 1f0196f..d7462b9 100644 --- a/Makefile +++ b/Makefile @@ -67,6 +67,7 @@ OBJS += virtio/net.o OBJS += virtio/rng.o OBJS += virtio/balloon.o OBJS += virtio/pci.o +OBJS += virtio/pstore.o OBJS += disk/blk.o OBJS += disk/qcow.o OBJS += disk/raw.o diff --git a/builtin-run.c b/builtin-run.c index 72b878d..08c12dd 100644 --- a/builtin-run.c +++ b/builtin-run.c @@ -128,6 +128,8 @@ void kvm_run_set_wrapper_sandbox(void) " rootfs"), \ OPT_STRING('\0', "hugetlbfs", &(cfg)->hugetlbfs_path, "path", \ "Hugetlbfs path"), \ + OPT_STRING('\0', "pstore", &(cfg)->pstore_path, "path", \ + "pstore data path"), \ \ OPT_GROUP("Kernel options:"), \ OPT_STRING('k', "kernel", &(cfg)->kernel_filename, "kernel", \ diff --git a/include/kvm/kvm-config.h b/include/kvm/kvm-config.h index 386fa8c..42b7651 100644 --- a/include/kvm/kvm-config.h +++ b/include/kvm/kvm-config.h @@ -45,6 +45,7 @@ struct kvm_config { const char *hugetlbfs_path; const char *custom_rootfs_name; const char *real_cmdline; + const char *pstore_path; struct virtio_net_params *net_params; bool single_step; bool vnc; diff --git a/include/kvm/virtio-pci-dev.h b/include/kvm/virtio-pci-dev.h index 48ae018..4339d94 100644 --- a/include/kvm/virtio-pci-dev.h +++ b/include/kvm/virtio-pci-dev.h @@ -15,6 +15,7 @@ #define PCI_DEVICE_ID_VIRTIO_BLN 0x1005 #define PCI_DEVICE_ID_VIRTIO_SCSI 0x1008 #define PCI_DEVICE_ID_VIRTIO_9P 0x1009 +#define PCI_DEVICE_ID_VIRTIO_PSTORE 0x100a #define PCI_DEVICE_ID_VESA 0x2000 #define PCI_DEVICE_ID_PCI_SHMEM 0x0001 @@ -34,5 +35,6 @@ #define PCI_CLASS_RNG 0xff0000 #define PCI_CLASS_BLN 0xff0000 #define PCI_CLASS_9P 0xff0000 +#define PCI_CLASS_PSTORE 0xff0000 #endif /* VIRTIO_PCI_DEV_H_ */ diff --git a/include/kvm/virtio-pstore.h b/include/kvm/virtio-pstore.h new file mode 100644 index 0000000..293ab57 --- /dev/null +++ b/include/kvm/virtio-pstore.h @@ -0,0 +1,31 @@ +#ifndef KVM__PSTORE_VIRTIO_H +#define KVM__PSTORE_VIRTIO_H + +struct kvm; + +#define VIRTIO_PSTORE_TYPE_UNKNOWN 0 +#define VIRTIO_PSTORE_TYPE_DMESG 1 + +#define VIRTIO_PSTORE_CMD_NULL 0 +#define VIRTIO_PSTORE_CMD_OPEN 1 +#define VIRTIO_PSTORE_CMD_READ 2 +#define VIRTIO_PSTORE_CMD_WRITE 3 +#define VIRTIO_PSTORE_CMD_ERASE 4 +#define VIRTIO_PSTORE_CMD_CLOSE 5 + +#define VIRTIO_PSTORE_FL_COMPRESSED 1 + +struct pstore_hdr { + u64 id; + u32 flags; + u16 cmd; + u16 type; + u64 time_sec; + u32 time_nsec; + u32 unused; +}; + +int virtio_pstore__init(struct kvm *kvm); +int virtio_pstore__exit(struct kvm *kvm); + +#endif /* KVM__PSTORE_VIRTIO_H */ diff --git a/include/linux/virtio_ids.h b/include/linux/virtio_ids.h index 5f60aa4..f34cabc 100644 --- a/include/linux/virtio_ids.h +++ b/include/linux/virtio_ids.h @@ -40,5 +40,6 @@ #define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */ #define VIRTIO_ID_CAIF 12 /* Virtio caif */ #define VIRTIO_ID_INPUT 18 /* virtio input */ +#define VIRTIO_ID_PSTORE 19 /* virtio pstore */ #endif /* _LINUX_VIRTIO_IDS_H */ diff --git a/virtio/pstore.c b/virtio/pstore.c new file mode 100644 index 0000000..094e54b --- /dev/null +++ b/virtio/pstore.c @@ -0,0 +1,359 @@ +#include "kvm/virtio-pstore.h" + +#include "kvm/virtio-pci-dev.h" + +#include "kvm/virtio.h" +#include "kvm/util.h" +#include "kvm/kvm.h" +#include "kvm/threadpool.h" +#include "kvm/guest_compat.h" + +#include <linux/virtio_ring.h> + +#include <linux/list.h> +#include <fcntl.h> +#include <dirent.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <pthread.h> +#include <linux/kernel.h> + +#define NUM_VIRT_QUEUES 1 +#define VIRTIO_PSTORE_QUEUE_SIZE 128 + +struct pstore_dev_job { + struct virt_queue *vq; + struct pstore_dev *pdev; + struct thread_pool__job job_id; +}; + +struct pstore_dev { + struct list_head list; + struct virtio_device vdev; + + int fd; + DIR *dir; + + /* virtio queue */ + struct virt_queue vqs[NUM_VIRT_QUEUES]; + struct pstore_dev_job jobs[NUM_VIRT_QUEUES]; +}; + +static LIST_HEAD(pdevs); +static int compat_id = -1; + +static u8 *get_config(struct kvm *kvm, void *dev) +{ + /* Unused */ + return 0; +} + +static u32 get_host_features(struct kvm *kvm, void *dev) +{ + /* Unused */ + return 0; +} + +static void set_guest_features(struct kvm *kvm, void *dev, u32 features) +{ + /* Unused */ +} + +static void virtio_pstore_hdr_to_filename(struct kvm *kvm, struct pstore_hdr *hdr, + char *buf, size_t sz) +{ + const char *basename; + + switch (hdr->type) { + case VIRTIO_PSTORE_TYPE_DMESG: + basename = "dmesg"; + break; + default: + basename = "unknown"; + break; + } + + snprintf(buf, sz, "%s/%s-%llu%s", kvm->cfg.pstore_path, basename, + hdr->id, hdr->flags & VIRTIO_PSTORE_FL_COMPRESSED ? ".enc.z" : ""); +} + +static void virtio_pstore_filename_to_hdr(struct kvm *kvm, struct pstore_hdr *hdr, + char *name, char *buf, size_t sz) +{ + size_t len = strlen(name); + + hdr->flags = 0; + if (!strncmp(name + len - 6, ".enc.z", 6)) + hdr->flags |= VIRTIO_PSTORE_FL_COMPRESSED; + + snprintf(buf, sz, "%s/%s", kvm->cfg.pstore_path, name); + + if (!strncmp(name, "dmesg", 5)) { + hdr->type = VIRTIO_PSTORE_TYPE_DMESG; + name += 5; + } else if (!strncmp(name, "unknown", 7)) { + hdr->type = VIRTIO_PSTORE_TYPE_UNKNOWN; + name += 7; + } + + hdr->id = strtoul(name + 1, NULL, 0); +} + +static int virtio_pstore_do_open(struct kvm *kvm, struct pstore_dev *pdev, + struct pstore_hdr *hdr, struct iovec *iov) +{ + pdev->dir = opendir(kvm->cfg.pstore_path); + if (pdev->dir == NULL) + return -errno; + + return 0; +} + +static int virtio_pstore_do_close(struct kvm *kvm, struct pstore_dev *pdev, + struct pstore_hdr *hdr, struct iovec *iov) +{ + if (pdev->dir == NULL) + return -1; + + closedir(pdev->dir); + pdev->dir = NULL; + + return 0; +} + +static ssize_t virtio_pstore_do_write(struct kvm *kvm, struct pstore_dev *pdev, + struct pstore_hdr *hdr, struct iovec *iov) +{ + char path[PATH_MAX]; + FILE *fp; + ssize_t len = 0; + + virtio_pstore_hdr_to_filename(kvm, hdr, path, sizeof(path)); + + fp = fopen(path, "a"); + if (fp == NULL) + return -1; + + len = fwrite(iov[1].iov_base, iov[1].iov_len, 1, fp); + if (len < 0 && errno == EAGAIN) + len = 0; + + fclose(fp); + return len; +} + +static ssize_t virtio_pstore_do_read(struct kvm *kvm, struct pstore_dev *pdev, + struct pstore_hdr *hdr, struct iovec *iov) +{ + char path[PATH_MAX]; + FILE *fp; + ssize_t len = 0; + struct stat stbuf; + struct dirent *dent; + + if (pdev->dir == NULL) + return 0; + + dent = readdir(pdev->dir); + while (dent) { + if (dent->d_name[0] != '.') + break; + dent = readdir(pdev->dir); + } + + if (dent == NULL) + return 0; + + virtio_pstore_filename_to_hdr(kvm, hdr, dent->d_name, path, sizeof(path)); + if (stat(path, &stbuf) < 0) + return -1; + + fp = fopen(path, "r"); + if (fp == NULL) + return -1; + + len = fread(iov[1].iov_base, 1, iov[1].iov_len, fp); + if (len < 0 && errno == EAGAIN) + len = 0; + + hdr->id = virtio_host_to_guest_u64(pdev->vqs, hdr->id); + hdr->flags = virtio_host_to_guest_u32(pdev->vqs, hdr->flags); + + hdr->time_sec = virtio_host_to_guest_u64(pdev->vqs, stbuf.st_ctim.tv_sec); + hdr->time_nsec = virtio_host_to_guest_u32(pdev->vqs, stbuf.st_ctim.tv_nsec); + + fclose(fp); + return len; +} + +static ssize_t virtio_pstore_do_erase(struct kvm *kvm, struct pstore_dev *pdev, + struct pstore_hdr *hdr, struct iovec *iov) +{ + char path[PATH_MAX]; + + virtio_pstore_hdr_to_filename(kvm, hdr, path, sizeof(path)); + + return unlink(path); +} + +static bool virtio_pstore_do_io_request(struct kvm *kvm, struct pstore_dev *pdev, + struct virt_queue *vq) +{ + struct iovec iov[VIRTIO_PSTORE_QUEUE_SIZE]; + struct pstore_hdr *hdr; + ssize_t len = 0; + u16 out, in, head; + + head = virt_queue__get_iov(vq, iov, &out, &in, kvm); + + hdr = iov[0].iov_base; + + switch (virtio_guest_to_host_u16(vq, hdr->cmd)) { + case VIRTIO_PSTORE_CMD_OPEN: + len = virtio_pstore_do_open(kvm, pdev, hdr, iov); + break; + case VIRTIO_PSTORE_CMD_READ: + len = virtio_pstore_do_read(kvm, pdev, hdr, iov); + break; + case VIRTIO_PSTORE_CMD_WRITE: + len = virtio_pstore_do_write(kvm, pdev, hdr, iov); + break; + case VIRTIO_PSTORE_CMD_CLOSE: + virtio_pstore_do_close(kvm, pdev, hdr, iov); + break; + case VIRTIO_PSTORE_CMD_ERASE: + len = virtio_pstore_do_erase(kvm, pdev, hdr, iov); + break; + default: + return false; + } + + if (len < 0) + return false; + + virt_queue__set_used_elem(vq, head, len); + + return true; +} + +static void virtio_pstore_do_io(struct kvm *kvm, void *param) +{ + struct pstore_dev_job *job = param; + struct virt_queue *vq = job->vq; + struct pstore_dev *pdev = job->pdev; + + while (virt_queue__available(vq)) + virtio_pstore_do_io_request(kvm, pdev, vq); + + pdev->vdev.ops->signal_vq(kvm, &pdev->vdev, vq - pdev->vqs); +} + +static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 page_size, u32 align, + u32 pfn) +{ + struct pstore_dev *pdev = dev; + struct virt_queue *queue; + struct pstore_dev_job *job; + void *p; + + compat__remove_message(compat_id); + + queue = &pdev->vqs[vq]; + queue->pfn = pfn; + p = virtio_get_vq(kvm, queue->pfn, page_size); + + job = &pdev->jobs[vq]; + + vring_init(&queue->vring, VIRTIO_PSTORE_QUEUE_SIZE, p, align); + + *job = (struct pstore_dev_job) { + .vq = queue, + .pdev = pdev, + }; + + thread_pool__init_job(&job->job_id, kvm, virtio_pstore_do_io, job); + + return 0; +} + +static int notify_vq(struct kvm *kvm, void *dev, u32 vq) +{ + struct pstore_dev *pdev = dev; + + thread_pool__do_job(&pdev->jobs[vq].job_id); + + return 0; +} + +static int get_pfn_vq(struct kvm *kvm, void *dev, u32 vq) +{ + struct pstore_dev *pdev = dev; + + return pdev->vqs[vq].pfn; +} + +static int get_size_vq(struct kvm *kvm, void *dev, u32 vq) +{ + return VIRTIO_PSTORE_QUEUE_SIZE; +} + +static int set_size_vq(struct kvm *kvm, void *dev, u32 vq, int size) +{ + /* FIXME: dynamic */ + return size; +} + +static struct virtio_ops pstore_dev_virtio_ops = { + .get_config = get_config, + .get_host_features = get_host_features, + .set_guest_features = set_guest_features, + .init_vq = init_vq, + .notify_vq = notify_vq, + .get_pfn_vq = get_pfn_vq, + .get_size_vq = get_size_vq, + .set_size_vq = set_size_vq, +}; + +int virtio_pstore__init(struct kvm *kvm) +{ + struct pstore_dev *pdev; + int r; + + if (!kvm->cfg.pstore_path) + return 0; + + pdev = malloc(sizeof(*pdev)); + if (pdev == NULL) + return -ENOMEM; + + r = virtio_init(kvm, pdev, &pdev->vdev, &pstore_dev_virtio_ops, + VIRTIO_DEFAULT_TRANS(kvm), PCI_DEVICE_ID_VIRTIO_PSTORE, + VIRTIO_ID_PSTORE, PCI_CLASS_PSTORE); + if (r < 0) + goto cleanup; + + list_add_tail(&pdev->list, &pdevs); + + if (compat_id == -1) + compat_id = virtio_compat_add_message("virtio-pstore", "CONFIG_VIRTIO_PSTORE"); + return 0; +cleanup: + free(pdev); + + return r; +} +virtio_dev_init(virtio_pstore__init); + +int virtio_pstore__exit(struct kvm *kvm) +{ + struct pstore_dev *pdev, *tmp; + + list_for_each_entry_safe(pdev, tmp, &pdevs, list) { + list_del(&pdev->list); + pdev->vdev.ops->exit(kvm, &pdev->vdev); + free(pdev); + } + + return 0; +} +virtio_dev_exit(virtio_pstore__exit); -- 2.8.0 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html