Signed-off-by: Sasha Levin <levinsasha928@xxxxxxxxx> --- tools/kvm/disk/qcow.c | 26 ++++++++++++-------------- tools/kvm/ioport.c | 7 ++----- tools/kvm/pci.c | 6 ++++-- tools/kvm/symbol.c | 18 +++++++++--------- tools/kvm/term.c | 1 - tools/kvm/util/util.c | 3 +-- tools/kvm/virtio/balloon.c | 6 +++--- tools/kvm/virtio/core.c | 1 - tools/kvm/virtio/net.c | 14 +++----------- tools/kvm/virtio/pci.c | 9 ++++----- tools/kvm/virtio/rng.c | 27 +++++++++++++-------------- tools/kvm/x86/bios.c | 6 +++--- tools/kvm/x86/cpuid.c | 7 ++++--- tools/kvm/x86/interrupt.c | 3 ++- tools/kvm/x86/kvm-cpu.c | 39 +++++++++++++++++++-------------------- tools/kvm/x86/kvm.c | 23 ++++++++++------------- 16 files changed, 89 insertions(+), 107 deletions(-) diff --git a/tools/kvm/disk/qcow.c b/tools/kvm/disk/qcow.c index e139fa5..b12fe53 100644 --- a/tools/kvm/disk/qcow.c +++ b/tools/kvm/disk/qcow.c @@ -329,18 +329,16 @@ static ssize_t qcow1_read_cluster(struct qcow *q, u64 offset, csize &= (q->cluster_size - 1); if (pread_in_full(q->fd, q->cluster_data, csize, - coffset) < 0) { + coffset) < 0) goto out_error; - } if (qcow_decompress_buffer(q->cluster_cache, q->cluster_size, - q->cluster_data, csize) < 0) { + q->cluster_data, csize) < 0) goto out_error; - } memcpy(dst, q->cluster_cache + clust_offset, length); mutex_unlock(&q->mutex); - } else{ + } else { if (!clust_start) goto zero_cluster; @@ -435,7 +433,7 @@ static ssize_t qcow2_read_cluster(struct qcow *q, u64 offset, memcpy(dst, q->cluster_cache + clust_offset, length); mutex_unlock(&q->mutex); - } else{ + } else { clust_start &= QCOW2_OFFSET_MASK; if (!clust_start) goto zero_cluster; @@ -470,11 +468,11 @@ static ssize_t qcow_read_sector_single(struct disk_image *disk, u64 sector, char *buf; u32 nr; - buf = dst; - nr_read = 0; + buf = dst; + nr_read = 0; while (nr_read < dst_len) { - offset = sector << SECTOR_SHIFT; + offset = sector << SECTOR_SHIFT; if (offset >= header->size) return -1; @@ -488,9 +486,9 @@ static ssize_t qcow_read_sector_single(struct disk_image *disk, u64 sector, if (nr <= 0) return -1; - nr_read += nr; - buf += nr; - sector += (nr >> SECTOR_SHIFT); + nr_read += nr; + buf += nr; + sector += (nr >> SECTOR_SHIFT); } return dst_len; @@ -508,9 +506,9 @@ static ssize_t qcow_read_sector(struct disk_image *disk, u64 sector, return -1; } - sector += iov->iov_len >> SECTOR_SHIFT; + sector += iov->iov_len >> SECTOR_SHIFT; + total += nr; iov++; - total += nr; } return total; diff --git a/tools/kvm/ioport.c b/tools/kvm/ioport.c index 965cfc2..b417942 100644 --- a/tools/kvm/ioport.c +++ b/tools/kvm/ioport.c @@ -111,13 +111,10 @@ bool kvm__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int s ops = entry->ops; while (count--) { - if (direction == KVM_EXIT_IO_IN) { - if (ops->io_in) + if (direction == KVM_EXIT_IO_IN && ops->io_in) ret = ops->io_in(entry, kvm, port, ptr, size); - } else { - if (ops->io_out) + else if (ops->io_out) ret = ops->io_out(entry, kvm, port, ptr, size); - } ptr += size; } diff --git a/tools/kvm/pci.c b/tools/kvm/pci.c index 59b2618..06eea0f 100644 --- a/tools/kvm/pci.c +++ b/tools/kvm/pci.c @@ -160,10 +160,12 @@ void pci__config_rd(struct kvm *kvm, union pci_config_address addr, void *data, void *p = pci_devices[dev_num]; memcpy(data, p + offset, size); - } else + } else { memset(data, 0x00, size); - } else + } + } else { memset(data, 0xff, size); + } } void pci__register(struct pci_device_header *dev, u8 dev_num) diff --git a/tools/kvm/symbol.c b/tools/kvm/symbol.c index 56dd346..a2b1e67 100644 --- a/tools/kvm/symbol.c +++ b/tools/kvm/symbol.c @@ -7,7 +7,7 @@ #include <stdio.h> #include <bfd.h> -static bfd *abfd; +static bfd *abfd; void symbol__init(const char *vmlinux) { @@ -16,7 +16,7 @@ void symbol__init(const char *vmlinux) bfd_init(); - abfd = bfd_openr(vmlinux, NULL); + abfd = bfd_openr(vmlinux, NULL); } static asymbol *lookup(asymbol **symbols, int nr_symbols, const char *symbol_name) @@ -53,17 +53,17 @@ char *symbol__lookup(struct kvm *kvm, unsigned long addr, char *sym, size_t size if (!bfd_check_format(abfd, bfd_object)) goto not_found; - symtab_size = bfd_get_symtab_upper_bound(abfd); + symtab_size = bfd_get_symtab_upper_bound(abfd); if (!symtab_size) goto not_found; - syms = malloc(symtab_size); + syms = malloc(symtab_size); if (!syms) goto not_found; - nr_syms = bfd_canonicalize_symtab(abfd, syms); + nr_syms = bfd_canonicalize_symtab(abfd, syms); - section = bfd_get_section_by_name(abfd, ".debug_aranges"); + section = bfd_get_section_by_name(abfd, ".debug_aranges"); if (!section) goto not_found; @@ -73,13 +73,13 @@ char *symbol__lookup(struct kvm *kvm, unsigned long addr, char *sym, size_t size if (!func) goto not_found; - symbol = lookup(syms, nr_syms, func); + symbol = lookup(syms, nr_syms, func); if (!symbol) goto not_found; - sym_start = bfd_asymbol_value(symbol); + sym_start = bfd_asymbol_value(symbol); - sym_offset = addr - sym_start; + sym_offset = addr - sym_start; snprintf(sym, size, "%s+%llx (%s:%i)", func, (long long) sym_offset, filename, line); diff --git a/tools/kvm/term.c b/tools/kvm/term.c index b7d8934..cc0c5a5 100644 --- a/tools/kvm/term.c +++ b/tools/kvm/term.c @@ -14,7 +14,6 @@ #include "kvm/kvm.h" #include "kvm/kvm-cpu.h" - #define TERM_FD_IN 0 #define TERM_FD_OUT 1 diff --git a/tools/kvm/util/util.c b/tools/kvm/util/util.c index 00f7315..4e14fd9 100644 --- a/tools/kvm/util/util.c +++ b/tools/kvm/util/util.c @@ -91,9 +91,8 @@ void *mmap_hugetlbfs(const char *htlbfs_path, u64 size) if (statfs(htlbfs_path, &sfs) < 0) die("Can't stat %s\n", htlbfs_path); - if ((unsigned int)sfs.f_type != HUGETLBFS_MAGIC) { + if ((unsigned int)sfs.f_type != HUGETLBFS_MAGIC) die("%s is not hugetlbfs!\n", htlbfs_path); - } blk_size = (unsigned long)sfs.f_bsize; if (sfs.f_bsize == 0 || blk_size > size) { diff --git a/tools/kvm/virtio/balloon.c b/tools/kvm/virtio/balloon.c index 07fd976..556f0ec 100644 --- a/tools/kvm/virtio/balloon.c +++ b/tools/kvm/virtio/balloon.c @@ -59,9 +59,9 @@ static bool virtio_bln_do_io_request(struct kvm *kvm, struct bln_dev *bdev, stru u16 out, in, head; u32 *ptrs, i; - head = virt_queue__get_iov(queue, iov, &out, &in, kvm); - ptrs = iov[0].iov_base; - len = iov[0].iov_len / sizeof(u32); + head = virt_queue__get_iov(queue, iov, &out, &in, kvm); + ptrs = iov[0].iov_base; + len = iov[0].iov_len / sizeof(u32); for (i = 0 ; i < len ; i++) { void *guest_ptr; diff --git a/tools/kvm/virtio/core.c b/tools/kvm/virtio/core.c index 5dc767a..c68572c 100644 --- a/tools/kvm/virtio/core.c +++ b/tools/kvm/virtio/core.c @@ -67,7 +67,6 @@ u16 virt_queue__get_head_iov(struct virt_queue *vq, struct iovec iov[], u16 *out desc = vq->vring.desc; if (desc[idx].flags & VRING_DESC_F_INDIRECT) { - max = desc[idx].len / sizeof(struct vring_desc); desc = guest_flat_to_host(kvm, desc[idx].addr); idx = 0; diff --git a/tools/kvm/virtio/net.c b/tools/kvm/virtio/net.c index 9b93c5d..bb97a7e 100644 --- a/tools/kvm/virtio/net.c +++ b/tools/kvm/virtio/net.c @@ -87,18 +87,14 @@ static void *virtio_net_rx_thread(void *p) vq = &ndev->vqs[VIRTIO_NET_RX_QUEUE]; while (1) { - mutex_lock(&ndev->io_rx_lock); if (!virt_queue__available(vq)) pthread_cond_wait(&ndev->io_rx_cond, &ndev->io_rx_lock); mutex_unlock(&ndev->io_rx_lock); while (virt_queue__available(vq)) { - head = virt_queue__get_iov(vq, iov, &out, &in, kvm); - len = ndev->ops->rx(iov, in, ndev); - virt_queue__set_used_elem(vq, head, len); /* We should interrupt guest right now, otherwise latency is huge. */ @@ -106,7 +102,6 @@ static void *virtio_net_rx_thread(void *p) ndev->vtrans.trans_ops->signal_vq(kvm, &ndev->vtrans, VIRTIO_NET_RX_QUEUE); } - } pthread_exit(NULL); @@ -134,11 +129,8 @@ static void *virtio_net_tx_thread(void *p) mutex_unlock(&ndev->io_tx_lock); while (virt_queue__available(vq)) { - head = virt_queue__get_iov(vq, iov, &out, &in, kvm); - len = ndev->ops->tx(iov, out, ndev); - virt_queue__set_used_elem(vq, head, len); } @@ -346,9 +338,9 @@ static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn) compat__remove_message(compat_id); - queue = &ndev->vqs[vq]; - queue->pfn = pfn; - p = guest_pfn_to_host(kvm, queue->pfn); + queue = &ndev->vqs[vq]; + queue->pfn = pfn; + p = guest_pfn_to_host(kvm, queue->pfn); vring_init(&queue->vring, VIRTIO_NET_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN); diff --git a/tools/kvm/virtio/pci.c b/tools/kvm/virtio/pci.c index 2ffa5de..3051314 100644 --- a/tools/kvm/virtio/pci.c +++ b/tools/kvm/virtio/pci.c @@ -150,7 +150,7 @@ static bool virtio_pci__specific_io_out(struct kvm *kvm, struct virtio_trans *vt vpci->config_gsi = gsi; break; - case VIRTIO_MSI_QUEUE_VECTOR: { + case VIRTIO_MSI_QUEUE_VECTOR: vec = vpci->vq_vector[vpci->queue_selector] = ioport__read16(data); gsi = irq__add_msix_route(kvm, &vpci->msix_table[vec].msg); @@ -159,7 +159,6 @@ static bool virtio_pci__specific_io_out(struct kvm *kvm, struct virtio_trans *vt vtrans->virtio_ops->notify_vq_gsi(kvm, vpci->dev, vpci->queue_selector, gsi); break; - } }; return true; @@ -195,14 +194,14 @@ static bool virtio_pci__io_out(struct ioport *ioport, struct kvm *kvm, u16 port, vtrans->virtio_ops->init_vq(kvm, vpci->dev, vpci->queue_selector, val); break; case VIRTIO_PCI_QUEUE_SEL: - vpci->queue_selector = ioport__read16(data); + vpci->queue_selector = ioport__read16(data); break; case VIRTIO_PCI_QUEUE_NOTIFY: - val = ioport__read16(data); + val = ioport__read16(data); vtrans->virtio_ops->notify_vq(kvm, vpci->dev, val); break; case VIRTIO_PCI_STATUS: - vpci->status = ioport__read8(data); + vpci->status = ioport__read8(data); break; default: ret = virtio_pci__specific_io_out(kvm, vtrans, port, data, size, offset); diff --git a/tools/kvm/virtio/rng.c b/tools/kvm/virtio/rng.c index c333e6b..c9430cb 100644 --- a/tools/kvm/virtio/rng.c +++ b/tools/kvm/virtio/rng.c @@ -70,8 +70,8 @@ static bool virtio_rng_do_io_request(struct kvm *kvm, struct rng_dev *rdev, stru unsigned int len = 0; u16 out, in, head; - head = virt_queue__get_iov(queue, iov, &out, &in, kvm); - len = readv(rdev->fd, iov, in); + head = virt_queue__get_iov(queue, iov, &out, &in, kvm); + len = readv(rdev->fd, iov, in); virt_queue__set_used_elem(queue, head, len); @@ -80,9 +80,9 @@ static bool virtio_rng_do_io_request(struct kvm *kvm, struct rng_dev *rdev, stru static void virtio_rng_do_io(struct kvm *kvm, void *param) { - struct rng_dev_job *job = param; - struct virt_queue *vq = job->vq; - struct rng_dev *rdev = job->rdev; + struct rng_dev_job *job = param; + struct virt_queue *vq = job->vq; + struct rng_dev *rdev = job->rdev; while (virt_queue__available(vq)) virtio_rng_do_io_request(kvm, rdev, vq); @@ -99,17 +99,17 @@ static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn) compat__remove_message(compat_id); - queue = &rdev->vqs[vq]; - queue->pfn = pfn; - p = guest_pfn_to_host(kvm, queue->pfn); + queue = &rdev->vqs[vq]; + queue->pfn = pfn; + p = guest_pfn_to_host(kvm, queue->pfn); job = &rdev->jobs[vq]; vring_init(&queue->vring, VIRTIO_RNG_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN); - *job = (struct rng_dev_job) { - .vq = queue, - .rdev = rdev, + *job = (struct rng_dev_job) { + .vq = queue, + .rdev = rdev, }; thread_pool__init_job(&job->job_id, kvm, virtio_rng_do_io, job); @@ -179,10 +179,9 @@ void virtio_rng__init(struct kvm *kvm) void virtio_rng__delete_all(struct kvm *kvm) { - while (!list_empty(&rdevs)) { - struct rng_dev *rdev; + struct rng_dev *rdev, *tmp; - rdev = list_first_entry(&rdevs, struct rng_dev, list); + list_for_each_entry_safe(rdev, tmp, &rdevs, list) { list_del(&rdev->list); free(rdev); } diff --git a/tools/kvm/x86/bios.c b/tools/kvm/x86/bios.c index 47b1f54..ded0717 100644 --- a/tools/kvm/x86/bios.c +++ b/tools/kvm/x86/bios.c @@ -38,7 +38,7 @@ static void setup_irq_handler(struct kvm *kvm, struct irq_handler *handler) struct real_intr_desc intr_desc; void *p; - p = guest_flat_to_host(kvm, handler->address); + p = guest_flat_to_host(kvm, handler->address); memcpy(p, handler->handler, handler->size); intr_desc = (struct real_intr_desc) { @@ -46,7 +46,7 @@ static void setup_irq_handler(struct kvm *kvm, struct irq_handler *handler) .offset = handler->address - MB_BIOS_BEGIN, }; - DIE_IF((handler->address - MB_BIOS_BEGIN) > (unsigned long)0xffff); + DIE_IF((handler->address - MB_BIOS_BEGIN) > 0xffffUL); interrupt_table__set(&kvm->interrupt_table, &intr_desc, handler->irq); } @@ -100,7 +100,7 @@ static void e820_setup(struct kvm *kvm) BUILD_BUG_ON(i > E820_X_MAX); - e820->nr_map = i; + e820->nr_map = i; } static void setup_vga_rom(struct kvm *kvm) diff --git a/tools/kvm/x86/cpuid.c b/tools/kvm/x86/cpuid.c index 64d31a3..644f37f 100644 --- a/tools/kvm/x86/cpuid.c +++ b/tools/kvm/x86/cpuid.c @@ -24,10 +24,10 @@ static void filter_cpuid(struct kvm_cpuid2 *kvm_cpuid) switch (entry->function) { case 6: /* Clear presence of IA32_ENERGY_PERF_BIAS */ - entry->ecx = entry->ecx & ~(1 << 3); + entry->ecx = entry->ecx & ~(1 << 3); break; case CPUID_FUNC_PERFMON: - entry->eax = 0x00; /* disable it */ + entry->eax = 0x00; /* disable it */ break; default: /* Keep the CPUID function as -is */ @@ -40,7 +40,8 @@ void kvm_cpu__setup_cpuid(struct kvm_cpu *vcpu) { struct kvm_cpuid2 *kvm_cpuid; - kvm_cpuid = calloc(1, sizeof(*kvm_cpuid) + MAX_KVM_CPUID_ENTRIES * sizeof(*kvm_cpuid->entries)); + kvm_cpuid = calloc(1, sizeof(*kvm_cpuid) + + MAX_KVM_CPUID_ENTRIES * sizeof(*kvm_cpuid->entries)); kvm_cpuid->nent = MAX_KVM_CPUID_ENTRIES; if (ioctl(vcpu->kvm->sys_fd, KVM_GET_SUPPORTED_CPUID, kvm_cpuid) < 0) diff --git a/tools/kvm/x86/interrupt.c b/tools/kvm/x86/interrupt.c index e4636c4..7d47869 100644 --- a/tools/kvm/x86/interrupt.c +++ b/tools/kvm/x86/interrupt.c @@ -20,7 +20,8 @@ void interrupt_table__setup(struct interrupt_table *itable, struct real_intr_des itable->entries[i] = *entry; } -void interrupt_table__set(struct interrupt_table *itable, struct real_intr_desc *entry, unsigned int num) +void interrupt_table__set(struct interrupt_table *itable, + struct real_intr_desc *entry, unsigned int num) { if (num < REAL_INTR_VECTORS) itable->entries[num] = *entry; diff --git a/tools/kvm/x86/kvm-cpu.c b/tools/kvm/x86/kvm-cpu.c index 30f1ad6..051699f 100644 --- a/tools/kvm/x86/kvm-cpu.c +++ b/tools/kvm/x86/kvm-cpu.c @@ -53,18 +53,18 @@ static inline u32 selector_to_base(u16 selector) /* * KVM on Intel requires 'base' to be 'selector * 16' in real mode. */ - return (u32)selector * 16; + return (u32)selector << 4; } static struct kvm_cpu *kvm_cpu__new(struct kvm *kvm) { struct kvm_cpu *vcpu; - vcpu = calloc(1, sizeof *vcpu); + vcpu = calloc(1, sizeof(*vcpu)); if (!vcpu) return NULL; - vcpu->kvm = kvm; + vcpu->kvm = kvm; return vcpu; } @@ -96,11 +96,11 @@ struct kvm_cpu *kvm_cpu__init(struct kvm *kvm, unsigned long cpu_id) int mmap_size; int coalesced_offset; - vcpu = kvm_cpu__new(kvm); + vcpu = kvm_cpu__new(kvm); if (!vcpu) return NULL; - vcpu->cpu_id = cpu_id; + vcpu->cpu_id = cpu_id; vcpu->vcpu_fd = ioctl(vcpu->kvm->vm_fd, KVM_CREATE_VCPU, cpu_id); if (vcpu->vcpu_fd < 0) @@ -159,7 +159,7 @@ static void kvm_cpu__setup_msrs(struct kvm_cpu *vcpu) vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_FAST_STRING); - vcpu->msrs->nmsrs = ndx; + vcpu->msrs->nmsrs = ndx; if (ioctl(vcpu->vcpu_fd, KVM_SET_MSRS, vcpu->msrs) < 0) die_perror("KVM_SET_MSRS failed"); @@ -168,8 +168,8 @@ static void kvm_cpu__setup_msrs(struct kvm_cpu *vcpu) static void kvm_cpu__setup_fpu(struct kvm_cpu *vcpu) { vcpu->fpu = (struct kvm_fpu) { - .fcw = 0x37f, - .mxcsr = 0x1f80, + .fcw = 0x37f, + .mxcsr = 0x1f80, }; if (ioctl(vcpu->vcpu_fd, KVM_SET_FPU, &vcpu->fpu) < 0) @@ -180,15 +180,15 @@ static void kvm_cpu__setup_regs(struct kvm_cpu *vcpu) { vcpu->regs = (struct kvm_regs) { /* We start the guest in 16-bit real mode */ - .rflags = 0x0000000000000002ULL, + .rflags = 0x0000000000000002ULL, - .rip = vcpu->kvm->boot_ip, - .rsp = vcpu->kvm->boot_sp, - .rbp = vcpu->kvm->boot_sp, + .rip = vcpu->kvm->boot_ip, + .rsp = vcpu->kvm->boot_sp, + .rbp = vcpu->kvm->boot_sp, }; if (vcpu->regs.rip > USHRT_MAX) - die("ip 0x%llx is too high for real mode", (u64) vcpu->regs.rip); + die("ip 0x%llx is too high for real mode", (u64)vcpu->regs.rip); if (ioctl(vcpu->vcpu_fd, KVM_SET_REGS, &vcpu->regs) < 0) die_perror("KVM_SET_REGS failed"); @@ -196,7 +196,6 @@ static void kvm_cpu__setup_regs(struct kvm_cpu *vcpu) static void kvm_cpu__setup_sregs(struct kvm_cpu *vcpu) { - if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0) die_perror("KVM_GET_SREGS failed"); @@ -318,12 +317,12 @@ void kvm_cpu__show_registers(struct kvm_cpu *vcpu) dprintf(debug_fd, "\n"); } -#define MAX_SYM_LEN 128 +#define MAX_SYM_LEN 128 void kvm_cpu__show_code(struct kvm_cpu *vcpu) { unsigned int code_bytes = 64; - unsigned int code_prologue = code_bytes * 43 / 64; + unsigned int code_prologue = 43; unsigned int code_len = code_bytes; char sym[MAX_SYM_LEN]; unsigned char c; @@ -377,19 +376,19 @@ void kvm_cpu__show_page_tables(struct kvm_cpu *vcpu) if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0) die("KVM_GET_SREGS failed"); - pte4 = guest_flat_to_host(vcpu->kvm, vcpu->sregs.cr3); + pte4 = guest_flat_to_host(vcpu->kvm, vcpu->sregs.cr3); if (!host_ptr_in_ram(vcpu->kvm, pte4)) return; - pte3 = guest_flat_to_host(vcpu->kvm, (*pte4 & ~0xfff)); + pte3 = guest_flat_to_host(vcpu->kvm, (*pte4 & ~0xfff)); if (!host_ptr_in_ram(vcpu->kvm, pte3)) return; - pte2 = guest_flat_to_host(vcpu->kvm, (*pte3 & ~0xfff)); + pte2 = guest_flat_to_host(vcpu->kvm, (*pte3 & ~0xfff)); if (!host_ptr_in_ram(vcpu->kvm, pte2)) return; - pte1 = guest_flat_to_host(vcpu->kvm, (*pte2 & ~0xfff)); + pte1 = guest_flat_to_host(vcpu->kvm, (*pte2 & ~0xfff)); if (!host_ptr_in_ram(vcpu->kvm, pte1)) return; diff --git a/tools/kvm/x86/kvm.c b/tools/kvm/x86/kvm.c index bc52ef3..d2fbbe2 100644 --- a/tools/kvm/x86/kvm.c +++ b/tools/kvm/x86/kvm.c @@ -21,7 +21,6 @@ #include <unistd.h> #include <stdio.h> #include <fcntl.h> -#include <asm/unistd.h> struct kvm_ext kvm_req_ext[] = { { DEFINE_KVM_EXT(KVM_CAP_COALESCED_MMIO) }, @@ -124,24 +123,23 @@ void kvm__arch_set_cmdline(char *cmdline, bool video) { strcpy(cmdline, "noapic noacpi pci=conf1 reboot=k panic=1 i8042.direct=1 " "i8042.dumbkbd=1 i8042.nopnp=1"); - if (video) { + if (video) strcat(cmdline, " video=vesafb console=tty0"); - } else + else strcat(cmdline, " console=ttyS0 earlyprintk=serial i8042.noaux=1"); } /* This function wraps the decision between hugetlbfs map (if requested) or normal mmap */ static void *mmap_anon_or_hugetlbfs(const char *hugetlbfs_path, u64 size) { - if (hugetlbfs_path) { + if (hugetlbfs_path) /* * We don't /need/ to map guest RAM from hugetlbfs, but we do so * if the user specifies a hugetlbfs path. */ return mmap_hugetlbfs(hugetlbfs_path, size); - } else { + else return mmap(NULL, size, PROT_RW, MAP_ANON_NORESERVE, -1, 0); - } } /* Architecture-specific KVM init */ @@ -158,19 +156,18 @@ void kvm__arch_init(struct kvm *kvm, const char *kvm_dev, const char *hugetlbfs_ if (ret < 0) die_perror("KVM_CREATE_PIT2 ioctl"); - kvm->ram_size = ram_size; + kvm->ram_size = ram_size; if (kvm->ram_size < KVM_32BIT_GAP_START) { kvm->ram_start = mmap_anon_or_hugetlbfs(hugetlbfs_path, ram_size); } else { kvm->ram_start = mmap_anon_or_hugetlbfs(hugetlbfs_path, ram_size + KVM_32BIT_GAP_SIZE); - if (kvm->ram_start != MAP_FAILED) { + if (kvm->ram_start != MAP_FAILED) /* * We mprotect the gap (see kvm__init_ram() for details) PROT_NONE so that * if we accidently write to it, we will know. */ mprotect(kvm->ram_start + KVM_32BIT_GAP_START, KVM_32BIT_GAP_SIZE, PROT_NONE); - } } if (kvm->ram_start == MAP_FAILED) die("out of memory"); @@ -238,7 +235,7 @@ int load_flat_binary(struct kvm *kvm, int fd_kernel, int fd_initrd, const char * return true; } -static const char *BZIMAGE_MAGIC = "HdrS"; +static const char *BZIMAGE_MAGIC = "HdrS"; bool load_bzimage(struct kvm *kvm, int fd_kernel, int fd_initrd, const char *kernel_cmdline, u16 vidmode) @@ -334,13 +331,13 @@ bool load_bzimage(struct kvm *kvm, int fd_kernel, kern_boot->hdr.ramdisk_size = initrd_stat.st_size; } - kvm->boot_selector = BOOT_LOADER_SELECTOR; + kvm->boot_selector = BOOT_LOADER_SELECTOR; /* * The real-mode setup code starts at offset 0x200 of a bzImage. See * Documentation/x86/boot.txt for details. */ - kvm->boot_ip = BOOT_LOADER_IP + 0x200; - kvm->boot_sp = BOOT_LOADER_SP; + kvm->boot_ip = BOOT_LOADER_IP + 0x200; + kvm->boot_sp = BOOT_LOADER_SP; return true; } -- 1.7.8 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html