[PATCH v2 3/5] reuse kvm_ioctl

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Start using kvm_ioctl's code.
For type safety, delete fd from kvm_context entirely, so the
compiler can play along with us helping to detect errors I might
have made.

Signed-off-by: Glauber Costa <glommer@xxxxxxxxxx>

Also, we were slightly different from qemu upstream in handling error
code from ioctl, since we were always testing for -1, while kvm_vm_ioctl
returns -errno. We already did this in most of the call sites, so
this patch has the big advantage of simplifying call sites.
---
 kvm-all.c      |    2 +-
 qemu-kvm-x86.c |   37 +++++++++++++++++--------------------
 qemu-kvm.c     |   41 ++++++++++++++++++++---------------------
 qemu-kvm.h     |    3 +--
 4 files changed, 39 insertions(+), 44 deletions(-)

diff --git a/kvm-all.c b/kvm-all.c
index 9373d99..0ec6475 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -793,6 +793,7 @@ void kvm_set_phys_mem(target_phys_addr_t start_addr,
     }
 }
 
+#endif
 int kvm_ioctl(KVMState *s, int type, ...)
 {
     int ret;
@@ -809,7 +810,6 @@ int kvm_ioctl(KVMState *s, int type, ...)
 
     return ret;
 }
-#endif
 
 int kvm_vm_ioctl(KVMState *s, int type, ...)
 {
diff --git a/qemu-kvm-x86.c b/qemu-kvm-x86.c
index eec64db..c9f9ac3 100644
--- a/qemu-kvm-x86.c
+++ b/qemu-kvm-x86.c
@@ -38,7 +38,7 @@ int kvm_set_tss_addr(kvm_context_t kvm, unsigned long addr)
 #ifdef KVM_CAP_SET_TSS_ADDR
 	int r;
 
-	r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
+	r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
 	if (r > 0) {
 		r = kvm_vm_ioctl(kvm_state, KVM_SET_TSS_ADDR, addr);
 		if (r < 0) {
@@ -56,7 +56,7 @@ static int kvm_init_tss(kvm_context_t kvm)
 #ifdef KVM_CAP_SET_TSS_ADDR
 	int r;
 
-	r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
+	r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
 	if (r > 0) {
 		/*
 		 * this address is 3 pages before the bios, and the bios should present
@@ -80,7 +80,7 @@ static int kvm_create_pit(kvm_context_t kvm)
 
 	kvm->pit_in_kernel = 0;
 	if (!kvm->no_pit_creation) {
-		r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_PIT);
+		r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_PIT);
 		if (r > 0) {
 			r = kvm_vm_ioctl(kvm_state, KVM_CREATE_PIT);
 			if (r >= 0)
@@ -356,11 +356,11 @@ void kvm_show_code(kvm_vcpu_context_t vcpu)
 struct kvm_msr_list *kvm_get_msr_list(kvm_context_t kvm)
 {
 	struct kvm_msr_list sizer, *msrs;
-	int r, e;
+	int r;
 
 	sizer.nmsrs = 0;
-	r = ioctl(kvm->fd, KVM_GET_MSR_INDEX_LIST, &sizer);
-	if (r == -1 && errno != E2BIG)
+	r = kvm_ioctl(kvm_state, KVM_GET_MSR_INDEX_LIST, &sizer);
+	if (r < 0 && r != -E2BIG)
 		return NULL;
 	/* Old kernel modules had a bug and could write beyond the provided
 	   memory. Allocate at least a safe amount of 1K. */
@@ -368,11 +368,10 @@ struct kvm_msr_list *kvm_get_msr_list(kvm_context_t kvm)
 				       sizer.nmsrs * sizeof(*msrs->indices)));
 
 	msrs->nmsrs = sizer.nmsrs;
-	r = ioctl(kvm->fd, KVM_GET_MSR_INDEX_LIST, msrs);
-	if (r == -1) {
-		e = errno;
+	r = kvm_ioctl(kvm_state, KVM_GET_MSR_INDEX_LIST, msrs);
+	if (r < 0) {
 		free(msrs);
-		errno = e;
+		errno = r;
 		return NULL;
 	}
 	return msrs;
@@ -413,10 +412,10 @@ int kvm_get_mce_cap_supported(kvm_context_t kvm, uint64_t *mce_cap,
 #ifdef KVM_CAP_MCE
     int r;
 
-    r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_MCE);
+    r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_MCE);
     if (r > 0) {
         *max_banks = r;
-        return ioctl(kvm->fd, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
+        return kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
     }
 #endif
     return -ENOSYS;
@@ -554,7 +553,7 @@ int kvm_set_shadow_pages(kvm_context_t kvm, unsigned int nrshadow_pages)
 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
 	int r;
 
-	r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
+	r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION,
 		  KVM_CAP_MMU_SHADOW_CACHE_CONTROL);
 	if (r > 0) {
 		r = kvm_vm_ioctl(kvm_state, KVM_SET_NR_MMU_PAGES, nrshadow_pages);
@@ -573,7 +572,7 @@ int kvm_get_shadow_pages(kvm_context_t kvm, unsigned int *nrshadow_pages)
 #ifdef KVM_CAP_MMU_SHADOW_CACHE_CONTROL
 	int r;
 
-	r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
+	r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION,
 		  KVM_CAP_MMU_SHADOW_CACHE_CONTROL);
 	if (r > 0) {
 		*nrshadow_pages = kvm_vm_ioctl(kvm_state, KVM_GET_NR_MMU_PAGES);
@@ -592,8 +591,8 @@ static int tpr_access_reporting(kvm_vcpu_context_t vcpu, int enabled)
 		.enabled = enabled,
 	};
 
-	r = ioctl(vcpu->kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_VAPIC);
-	if (r == -1 || r == 0)
+	r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_VAPIC);
+	if (r <= 0)
 		return -ENOSYS;
 	r = ioctl(vcpu->fd, KVM_TPR_ACCESS_REPORTING, &tac);
 	if (r == -1) {
@@ -626,10 +625,8 @@ static struct kvm_cpuid2 *try_get_cpuid(kvm_context_t kvm, int max)
 	size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
 	cpuid = qemu_malloc(size);
 	cpuid->nent = max;
-	r = ioctl(kvm->fd, KVM_GET_SUPPORTED_CPUID, cpuid);
-	if (r == -1)
-		r = -errno;
-	else if (r == 0 && cpuid->nent >= max)
+	r = kvm_ioctl(kvm_state, KVM_GET_SUPPORTED_CPUID, cpuid);
+	if (r == 0 && cpuid->nent >= max)
 		r = -E2BIG;
 	if (r < 0) {
 		if (r == -E2BIG) {
diff --git a/qemu-kvm.c b/qemu-kvm.c
index e90681f..98cfee0 100644
--- a/qemu-kvm.c
+++ b/qemu-kvm.c
@@ -227,7 +227,7 @@ static int get_free_slot(kvm_context_t kvm)
 	int tss_ext;
 
 #if defined(KVM_CAP_SET_TSS_ADDR) && !defined(__s390__)
-	tss_ext = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
+	tss_ext = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SET_TSS_ADDR);
 #else
 	tss_ext = 0;
 #endif
@@ -451,7 +451,7 @@ int kvm_init(int smp_cpus)
 	kvm_state = qemu_mallocz(sizeof(*kvm_state));
     kvm_context = &kvm_state->kvm_context;
 
-	kvm_context->fd = fd;
+	kvm_state->fd = fd;
 	kvm_state->vmfd = -1;
 	kvm_context->opaque = cpu_single_env;
 	kvm_context->dirty_pages_log_all = 0;
@@ -492,7 +492,7 @@ static void kvm_finalize(KVMState *s)
 	if (kvm->vm_fd != -1)
 		close(kvm->vm_fd);
 	*/
-	close(s->kvm_context.fd);
+	close(s->fd);
 	free(s);
 }
 
@@ -526,8 +526,8 @@ kvm_vcpu_context_t kvm_create_vcpu(CPUState *env, int id)
     env->kvm_fd = r;
     env->kvm_state = kvm_state;
 
-	mmap_size = ioctl(kvm->fd, KVM_GET_VCPU_MMAP_SIZE, 0);
-	if (mmap_size == -1) {
+	mmap_size = kvm_ioctl(kvm_state, KVM_GET_VCPU_MMAP_SIZE, 0);
+	if (mmap_size < 0) {
 		fprintf(stderr, "get vcpu mmap size: %m\n");
 		goto err_fd;
 	}
@@ -548,7 +548,7 @@ err:
 static int kvm_set_boot_vcpu_id(kvm_context_t kvm, uint32_t id)
 {
 #ifdef KVM_CAP_SET_BOOT_CPU_ID
-    int r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_SET_BOOT_CPU_ID);
+    int r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SET_BOOT_CPU_ID);
     if (r > 0)
         return kvm_vm_ioctl(kvm_state, KVM_SET_BOOT_CPU_ID, id);
     return -ENOSYS;
@@ -559,15 +559,14 @@ static int kvm_set_boot_vcpu_id(kvm_context_t kvm, uint32_t id)
 
 int kvm_create_vm(kvm_context_t kvm)
 {
-	int fd = kvm->fd;
-
+    int fd;
 #ifdef KVM_CAP_IRQ_ROUTING
 	kvm->irq_routes = qemu_mallocz(sizeof(*kvm->irq_routes));
 	kvm->nr_allocated_irq_routes = 0;
 #endif
 
-	fd = ioctl(fd, KVM_CREATE_VM, 0);
-	if (fd == -1) {
+	fd = kvm_ioctl(kvm_state, KVM_CREATE_VM, 0);
+	if (fd < 0) {
 		fprintf(stderr, "kvm_create_vm: %m\n");
 		return -1;
 	}
@@ -580,7 +579,7 @@ static int kvm_create_default_phys_mem(kvm_context_t kvm,
 				       void **vm_mem)
 {
 #ifdef KVM_CAP_USER_MEMORY
-	int r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY);
+	int r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY);
 	if (r > 0)
 		return 0;
 	fprintf(stderr, "Hypervisor too old: KVM_CAP_USER_MEMORY extension not supported\n");
@@ -594,7 +593,7 @@ int kvm_check_extension(kvm_context_t kvm, int ext)
 {
 	int ret;
 
-	ret = ioctl(kvm->fd, KVM_CHECK_EXTENSION, ext);
+	ret = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, ext);
 	if (ret > 0)
 		return ret;
 	return 0;
@@ -607,13 +606,13 @@ void kvm_create_irqchip(kvm_context_t kvm)
 	kvm->irqchip_in_kernel = 0;
 #ifdef KVM_CAP_IRQCHIP
 	if (!kvm->no_irqchip_creation) {
-		r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_IRQCHIP);
+		r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_IRQCHIP);
 		if (r > 0) {	/* kernel irqchip supported */
 			r = kvm_vm_ioctl(kvm_state, KVM_CREATE_IRQCHIP);
 			if (r >= 0) {
 				kvm->irqchip_inject_ioctl = KVM_IRQ_LINE;
 #if defined(KVM_CAP_IRQ_INJECT_STATUS) && defined(KVM_IRQ_LINE_STATUS)
-				r = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
+				r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION,
 			  KVM_CAP_IRQ_INJECT_STATUS);
 				if (r > 0)
 					kvm->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS;
@@ -942,7 +941,7 @@ int kvm_get_mpstate(kvm_vcpu_context_t vcpu, struct kvm_mp_state *mp_state)
 {
     int r;
 
-    r = ioctl(vcpu->kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE);
+    r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE);
     if (r > 0)
         return ioctl(vcpu->fd, KVM_GET_MP_STATE, mp_state);
     return -ENOSYS;
@@ -952,7 +951,7 @@ int kvm_set_mpstate(kvm_vcpu_context_t vcpu, struct kvm_mp_state *mp_state)
 {
     int r;
 
-    r = ioctl(vcpu->kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE);
+    r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_MP_STATE);
     if (r > 0)
         return ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state);
     return -ENOSYS;
@@ -1188,7 +1187,7 @@ int kvm_has_sync_mmu(void)
 {
         int r = 0;
 #ifdef KVM_CAP_SYNC_MMU
-        r = ioctl(kvm_context->fd, KVM_CHECK_EXTENSION, KVM_CAP_SYNC_MMU);
+        r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_SYNC_MMU);
 #endif
         return r;
 }
@@ -1207,7 +1206,7 @@ int kvm_init_coalesced_mmio(kvm_context_t kvm)
 	int r = 0;
 	kvm->coalesced_mmio = 0;
 #ifdef KVM_CAP_COALESCED_MMIO
-	r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_COALESCED_MMIO);
+	r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_COALESCED_MMIO);
 	if (r > 0) {
 		kvm->coalesced_mmio = r;
 		return 0;
@@ -1282,7 +1281,7 @@ int kvm_assign_irq(kvm_context_t kvm,
 {
 	int ret;
 
-	ret = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_ASSIGN_DEV_IRQ);
+	ret = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_ASSIGN_DEV_IRQ);
 	if (ret > 0) {
 		return kvm_vm_ioctl(kvm_state, KVM_ASSIGN_DEV_IRQ, assigned_irq);
 	}
@@ -1317,7 +1316,7 @@ int kvm_destroy_memory_region_works(kvm_context_t kvm)
 	int ret = 0;
 
 #ifdef KVM_CAP_DESTROY_MEMORY_REGION_WORKS
-	ret = ioctl(kvm->fd, KVM_CHECK_EXTENSION,
+	ret = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION,
 		    KVM_CAP_DESTROY_MEMORY_REGION_WORKS);
 	if (ret <= 0)
 		ret = 0;
@@ -1333,7 +1332,7 @@ int kvm_reinject_control(kvm_context_t kvm, int pit_reinject)
 
 	control.pit_reinject = pit_reinject;
 
-	r = ioctl(kvm->fd, KVM_CHECK_EXTENSION, KVM_CAP_REINJECT_CONTROL);
+	r = kvm_ioctl(kvm_state, KVM_CHECK_EXTENSION, KVM_CAP_REINJECT_CONTROL);
 	if (r > 0) {
 		return  kvm_vm_ioctl(kvm_state, KVM_REINJECT_CONTROL, &control);
 	}
diff --git a/qemu-kvm.h b/qemu-kvm.h
index b52a249..8c9b72f 100644
--- a/qemu-kvm.h
+++ b/qemu-kvm.h
@@ -52,8 +52,6 @@ extern int kvm_abi;
  */
 
 struct kvm_context {
-	/// Filedescriptor to /dev/kvm
-	int fd;
 	void *opaque;
 	/// is dirty pages logging enabled for all regions or not
 	int dirty_pages_log_all;
@@ -1198,6 +1196,7 @@ typedef struct KVMState
 
 extern KVMState *kvm_state;
 
+int kvm_ioctl(KVMState *s, int type, ...);
 int kvm_vm_ioctl(KVMState *s, int type, ...);
 
 #endif
-- 
1.6.2.2

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux