[patch] remove vcpu_info array v5

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi,

So here we go, trying to catch up on the PCI passthrough patch revision
number, here's v5 of the struct vcpu_info patch.

In the end I decided to merge the contents of struct vcpu_info directly
into CPU_COMMON as it was silly to create new files just to remove them
in the next patch again.

This boots for me on ia64 and builds on x86_64, obviously it is 100%
perfect <tm>!

Comments, bug reports, or upstream merge, welcome :-)

Cheers,
Jes

Merge vcpu_info into CPUState.

Move contents of struct vcpu_info directly into CPU_COMMON. Rename
struct qemu_kvm_work_item to qemu_work_item as it really isn't KVM
specific.

This eliminates the ugly static sized array of struct vcpu_info.

Signed-off-by: Jes Sorensen <jes@xxxxxxx>

---
 libkvm/kvm-common.h     |    8 +-
 libkvm/libkvm.c         |   28 +++----
 libkvm/libkvm.h         |   10 +-
 qemu/cpu-defs.h         |   15 +++-
 qemu/hw/acpi.c          |   18 ++++
 qemu/qemu-common.h      |    8 ++
 qemu/qemu-kvm-ia64.c    |    4 -
 qemu/qemu-kvm-powerpc.c |    5 -
 qemu/qemu-kvm-x86.c     |   11 +--
 qemu/qemu-kvm.c         |  175 ++++++++++++++++++++++--------------------------
 qemu/qemu-kvm.h         |    6 -
 qemu/target-ia64/cpu.h  |    1 
 12 files changed, 156 insertions(+), 133 deletions(-)

Index: kvm-userspace.git/libkvm/kvm-common.h
===================================================================
--- kvm-userspace.git.orig/libkvm/kvm-common.h
+++ kvm-userspace.git/libkvm/kvm-common.h
@@ -84,11 +84,11 @@
 void kvm_show_code(kvm_context_t kvm, int vcpu);
 
 int handle_halt(kvm_context_t kvm, int vcpu);
-int handle_shutdown(kvm_context_t kvm, int vcpu);
-void post_kvm_run(kvm_context_t kvm, int vcpu);
-int pre_kvm_run(kvm_context_t kvm, int vcpu);
+int handle_shutdown(kvm_context_t kvm, void *env);
+void post_kvm_run(kvm_context_t kvm, void *env);
+int pre_kvm_run(kvm_context_t kvm, void *env);
 int handle_io_window(kvm_context_t kvm);
-int handle_debug(kvm_context_t kvm, int vcpu);
+int handle_debug(kvm_context_t kvm, void *env);
 int try_push_interrupts(kvm_context_t kvm);
 
 #endif
Index: kvm-userspace.git/libkvm/libkvm.c
===================================================================
--- kvm-userspace.git.orig/libkvm/libkvm.c
+++ kvm-userspace.git/libkvm/libkvm.c
@@ -738,9 +738,9 @@
 	return 0;
 }
 
-int handle_debug(kvm_context_t kvm, int vcpu)
+int handle_debug(kvm_context_t kvm, void *env)
 {
-	return kvm->callbacks->debug(kvm->opaque, vcpu);
+	return kvm->callbacks->debug(kvm->opaque, env);
 }
 
 int kvm_get_regs(kvm_context_t kvm, int vcpu, struct kvm_regs *regs)
@@ -822,9 +822,9 @@
 	return kvm->callbacks->halt(kvm->opaque, vcpu);
 }
 
-int handle_shutdown(kvm_context_t kvm, int vcpu)
+int handle_shutdown(kvm_context_t kvm, void *env)
 {
-	return kvm->callbacks->shutdown(kvm->opaque, vcpu);
+	return kvm->callbacks->shutdown(kvm->opaque, env);
 }
 
 int try_push_interrupts(kvm_context_t kvm)
@@ -837,14 +837,14 @@
 	return kvm->callbacks->try_push_nmi(kvm->opaque);
 }
 
-void post_kvm_run(kvm_context_t kvm, int vcpu)
+void post_kvm_run(kvm_context_t kvm, void *env)
 {
-	kvm->callbacks->post_kvm_run(kvm->opaque, vcpu);
+	kvm->callbacks->post_kvm_run(kvm->opaque, env);
 }
 
-int pre_kvm_run(kvm_context_t kvm, int vcpu)
+int pre_kvm_run(kvm_context_t kvm, void *env)
 {
-	return kvm->callbacks->pre_kvm_run(kvm->opaque, vcpu);
+	return kvm->callbacks->pre_kvm_run(kvm->opaque, env);
 }
 
 int kvm_get_interrupt_flag(kvm_context_t kvm, int vcpu)
@@ -872,7 +872,7 @@
 #endif
 }
 
-int kvm_run(kvm_context_t kvm, int vcpu)
+int kvm_run(kvm_context_t kvm, int vcpu, void *env)
 {
 	int r;
 	int fd = kvm->vcpu_fd[vcpu];
@@ -886,19 +886,19 @@
 	if (!kvm->irqchip_in_kernel)
 		run->request_interrupt_window = try_push_interrupts(kvm);
 #endif
-	r = pre_kvm_run(kvm, vcpu);
+	r = pre_kvm_run(kvm, env);
 	if (r)
 	    return r;
 	r = ioctl(fd, KVM_RUN, 0);
 
 	if (r == -1 && errno != EINTR && errno != EAGAIN) {
 		r = -errno;
-		post_kvm_run(kvm, vcpu);
+		post_kvm_run(kvm, env);
 		fprintf(stderr, "kvm_run: %s\n", strerror(-r));
 		return r;
 	}
 
-	post_kvm_run(kvm, vcpu);
+	post_kvm_run(kvm, env);
 
 #if defined(KVM_CAP_COALESCED_MMIO)
 	if (kvm->coalesced_mmio) {
@@ -948,7 +948,7 @@
 			r = handle_io(kvm, run, vcpu);
 			break;
 		case KVM_EXIT_DEBUG:
-			r = handle_debug(kvm, vcpu);
+			r = handle_debug(kvm, env);
 			break;
 		case KVM_EXIT_MMIO:
 			r = handle_mmio(kvm, run);
@@ -962,7 +962,7 @@
 #endif
 			break;
 		case KVM_EXIT_SHUTDOWN:
-			r = handle_shutdown(kvm, vcpu);
+			r = handle_shutdown(kvm, env);
 			break;
 #if defined(__s390__)
 		case KVM_EXIT_S390_SIEIC:
Index: kvm-userspace.git/libkvm/libkvm.h
===================================================================
--- kvm-userspace.git.orig/libkvm/libkvm.h
+++ kvm-userspace.git/libkvm/libkvm.h
@@ -55,7 +55,7 @@
 	/// generic memory writes to unmapped memory (For MMIO devices)
     int (*mmio_write)(void *opaque, uint64_t addr, uint8_t *data,
 					int len);
-    int (*debug)(void *opaque, int vcpu);
+    int (*debug)(void *opaque, void *env);
 	/*!
 	 * \brief Called when the VCPU issues an 'hlt' instruction.
 	 *
@@ -63,12 +63,12 @@
 	 * on the host CPU.
 	 */
     int (*halt)(void *opaque, int vcpu);
-    int (*shutdown)(void *opaque, int vcpu);
+    int (*shutdown)(void *opaque, void *env);
     int (*io_window)(void *opaque);
     int (*try_push_interrupts)(void *opaque);
     int (*try_push_nmi)(void *opaque);
-    void (*post_kvm_run)(void *opaque, int vcpu);
-    int (*pre_kvm_run)(void *opaque, int vcpu);
+    void (*post_kvm_run)(void *opaque, void *env);
+    int (*pre_kvm_run)(void *opaque, void *env);
     int (*tpr_access)(void *opaque, int vcpu, uint64_t rip, int is_write);
 #if defined(__powerpc__)
     int (*powerpc_dcr_read)(int vcpu, uint32_t dcrn, uint32_t *data);
@@ -181,7 +181,7 @@
  * return except for when an error has occured, or when you have sent it
  * an EINTR signal.
  */
-int kvm_run(kvm_context_t kvm, int vcpu);
+int kvm_run(kvm_context_t kvm, int vcpu, void *env);
 
 /*!
  * \brief Get interrupt flag from on last exit to userspace
Index: kvm-userspace.git/qemu/cpu-defs.h
===================================================================
--- kvm-userspace.git.orig/qemu/cpu-defs.h
+++ kvm-userspace.git/qemu/cpu-defs.h
@@ -27,6 +27,7 @@
 #include "config.h"
 #include <setjmp.h>
 #include <inttypes.h>
+#include <pthread.h>
 #include "osdep.h"
 
 #ifndef TARGET_LONG_BITS
@@ -142,6 +143,9 @@
 } icount_decr_u16;
 #endif
 
+/* forward decleration */
+struct qemu_work_item;
+
 #define CPU_TEMP_BUF_NLONGS 128
 #define CPU_COMMON                                                      \
     struct TranslationBlock *current_tb; /* currently executing TB  */  \
@@ -200,6 +204,15 @@
     /* user data */                                                     \
     void *opaque;                                                       \
                                                                         \
-    const char *cpu_model_str;
+    const char *cpu_model_str;                                          \
+                                                                        \
+    int sipi_needed;                                                    \
+    int init;                                                           \
+    pthread_t thread;                                                   \
+    int signalled;                                                      \
+    int stop;                                                           \
+    int stopped;                                                        \
+    int created;                                                        \
+    struct qemu_work_item *queued_work_first, *queued_work_last;
 
 #endif
Index: kvm-userspace.git/qemu/hw/acpi.c
===================================================================
--- kvm-userspace.git.orig/qemu/hw/acpi.c
+++ kvm-userspace.git/qemu/hw/acpi.c
@@ -722,6 +722,24 @@
 }
 
 #if defined(TARGET_I386) || defined(TARGET_X86_64)
+#ifdef USE_KVM
+static CPUState *qemu_kvm_cpu_env(int index)
+{
+    CPUState *penv;
+
+    penv = first_cpu;
+
+    while (penv) {
+        if (penv->cpu_index == index)
+            return penv;
+        penv = (CPUState *)penv->next_cpu;
+    }
+
+    return NULL;
+}
+#endif
+
+
 void qemu_system_cpu_hot_add(int cpu, int state)
 {
     CPUState *env;
Index: kvm-userspace.git/qemu/qemu-common.h
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-common.h
+++ kvm-userspace.git/qemu/qemu-common.h
@@ -143,4 +143,12 @@
 void cpu_save(QEMUFile *f, void *opaque);
 int cpu_load(QEMUFile *f, void *opaque, int version_id);
 
+/* work queue */
+struct qemu_work_item {
+    struct qemu_kvm_work_item *next;
+    void (*func)(void *data);
+    void *data;
+    int done;
+};
+
 #endif
Index: kvm-userspace.git/qemu/qemu-kvm-ia64.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm-ia64.c
+++ kvm-userspace.git/qemu/qemu-kvm-ia64.c
@@ -39,11 +39,11 @@
     return 1;
 }
 
-void kvm_arch_pre_kvm_run(void *opaque, int vcpu)
+void kvm_arch_pre_kvm_run(void *opaque, CPUState *env)
 {
 }
 
-void kvm_arch_post_kvm_run(void *opaque, int vcpu)
+void kvm_arch_post_kvm_run(void *opaque, CPUState *env)
 {
 }
 
Index: kvm-userspace.git/qemu/qemu-kvm-powerpc.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm-powerpc.c
+++ kvm-userspace.git/qemu/qemu-kvm-powerpc.c
@@ -142,14 +142,13 @@
     return 1;
 }
 
-void kvm_arch_pre_kvm_run(void *opaque, int vcpu)
+void kvm_arch_pre_kvm_run(void *opaque, CPUState *env)
 {
 	return;
 }
 
-void kvm_arch_post_kvm_run(void *opaque, int vcpu)
+void kvm_arch_post_kvm_run(void *opaque, CPUState *env)
 {
-    CPUState *env = qemu_kvm_cpu_env(vcpu);
     cpu_single_env = env;
 }
 
Index: kvm-userspace.git/qemu/qemu-kvm-x86.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm-x86.c
+++ kvm-userspace.git/qemu/qemu-kvm-x86.c
@@ -618,17 +618,16 @@
     return 1;
 }
 
-void kvm_arch_pre_kvm_run(void *opaque, int vcpu)
+void kvm_arch_pre_kvm_run(void *opaque, CPUState *env)
 {
-    CPUState *env = cpu_single_env;
-
     if (!kvm_irqchip_in_kernel(kvm_context))
-	kvm_set_cr8(kvm_context, vcpu, cpu_get_apic_tpr(env));
+	kvm_set_cr8(kvm_context, env->cpu_index, cpu_get_apic_tpr(env));
 }
 
-void kvm_arch_post_kvm_run(void *opaque, int vcpu)
+void kvm_arch_post_kvm_run(void *opaque, CPUState *env)
 {
-    CPUState *env = qemu_kvm_cpu_env(vcpu);
+    int vcpu = env->cpu_index;
+
     cpu_single_env = env;
 
     env->eflags = kvm_get_interrupt_flag(kvm_context, vcpu)
Index: kvm-userspace.git/qemu/qemu-kvm.c
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm.c
+++ kvm-userspace.git/qemu/qemu-kvm.c
@@ -28,7 +28,6 @@
 #include <sys/syscall.h>
 #include <sys/mman.h>
 
-#define bool _Bool
 #define false 0
 #define true 1
 
@@ -43,31 +42,12 @@
 pthread_cond_t qemu_system_cond = PTHREAD_COND_INITIALIZER;
 pthread_cond_t qemu_pause_cond = PTHREAD_COND_INITIALIZER;
 pthread_cond_t qemu_work_cond = PTHREAD_COND_INITIALIZER;
-__thread struct vcpu_info *vcpu;
+__thread struct CPUState *current_env;
 
 static int qemu_system_ready;
 
 #define SIG_IPI (SIGRTMIN+4)
 
-struct qemu_kvm_work_item {
-    struct qemu_kvm_work_item *next;
-    void (*func)(void *data);
-    void *data;
-    bool done;
-};
-
-struct vcpu_info {
-    CPUState *env;
-    int sipi_needed;
-    int init;
-    pthread_t thread;
-    int signalled;
-    int stop;
-    int stopped;
-    int created;
-    struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
-} vcpu_info[256];
-
 pthread_t io_thread;
 static int io_thread_fd = -1;
 static int io_thread_sigfd = -1;
@@ -91,43 +71,37 @@
     cpu_single_env = env;
 }
 
-CPUState *qemu_kvm_cpu_env(int index)
-{
-    return vcpu_info[index].env;
-}
-
 static void sig_ipi_handler(int n)
 {
 }
 
 static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
 {
-    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
-    struct qemu_kvm_work_item wi;
+    struct qemu_work_item wi;
 
-    if (vi == vcpu) {
+    if (env == current_env) {
         func(data);
         return;
     }
 
     wi.func = func;
     wi.data = data;
-    if (!vi->queued_work_first)
-        vi->queued_work_first = &wi;
+    if (!env->queued_work_first)
+        env->queued_work_first = &wi;
     else
-        vi->queued_work_last->next = &wi;
-    vi->queued_work_last = &wi;
+        env->queued_work_last->next = &wi;
+    env->queued_work_last = &wi;
     wi.next = NULL;
     wi.done = false;
 
-    pthread_kill(vi->thread, SIG_IPI);
+    pthread_kill(env->thread, SIG_IPI);
     while (!wi.done)
         qemu_cond_wait(&qemu_work_cond);
 }
 
 static void inject_interrupt(void *data)
 {
-    cpu_interrupt(vcpu->env, (int)data);
+    cpu_interrupt(current_env, (int)data);
 }
 
 void kvm_inject_interrupt(CPUState *env, int mask)
@@ -140,29 +114,33 @@
     int signal = 0;
 
     if (env) {
-        if (!vcpu)
+        if (current_env && !current_env->created)
             signal = 1;
-        if (vcpu && env != vcpu->env && !vcpu_info[env->cpu_index].signalled)
+        /*
+         * Testing for created here is really redundant
+         */
+        if (current_env && current_env->created &&
+            env != current_env && !env->signalled)
             signal = 1;
 
         if (signal) {
-            vcpu_info[env->cpu_index].signalled = 1;
-                if (vcpu_info[env->cpu_index].thread)
-                    pthread_kill(vcpu_info[env->cpu_index].thread, SIG_IPI);
+            env->signalled = 1;
+            if (env->thread)
+                pthread_kill(env->thread, SIG_IPI);
         }
     }
 }
 
 void kvm_update_after_sipi(CPUState *env)
 {
-    vcpu_info[env->cpu_index].sipi_needed = 1;
+    env->sipi_needed = 1;
     kvm_update_interrupt_request(env);
 }
 
 void kvm_apic_init(CPUState *env)
 {
     if (env->cpu_index != 0)
-	vcpu_info[env->cpu_index].init = 1;
+	env->init = 1;
     kvm_update_interrupt_request(env);
 }
 
@@ -178,18 +156,19 @@
     return kvm_arch_try_push_nmi(opaque);
 }
 
-static void post_kvm_run(void *opaque, int vcpu)
+static void post_kvm_run(void *opaque, void *data)
 {
+    CPUState *env = (CPUState *)data;
 
     pthread_mutex_lock(&qemu_mutex);
-    kvm_arch_post_kvm_run(opaque, vcpu);
+    kvm_arch_post_kvm_run(opaque, env);
 }
 
-static int pre_kvm_run(void *opaque, int vcpu)
+static int pre_kvm_run(void *opaque, void *data)
 {
-    CPUState *env = qemu_kvm_cpu_env(vcpu);
+    CPUState *env = (CPUState *)data;
 
-    kvm_arch_pre_kvm_run(opaque, vcpu);
+    kvm_arch_pre_kvm_run(opaque, env);
 
     if (env->interrupt_request & CPU_INTERRUPT_EXIT)
 	return 1;
@@ -227,7 +206,7 @@
 {
     int r;
 
-    r = kvm_run(kvm_context, env->cpu_index);
+    r = kvm_run(kvm_context, env->cpu_index, env);
     if (r < 0) {
         printf("kvm_run returned %d\n", r);
         exit(1);
@@ -240,7 +219,7 @@
 
 static int has_work(CPUState *env)
 {
-    if (!vm_running || (env && vcpu_info[env->cpu_index].stopped))
+    if (!vm_running || (env && env->stopped))
 	return 0;
     if (!env->halted)
 	return 1;
@@ -249,18 +228,17 @@
 
 static void flush_queued_work(CPUState *env)
 {
-    struct vcpu_info *vi = &vcpu_info[env->cpu_index];
-    struct qemu_kvm_work_item *wi;
+    struct qemu_work_item *wi;
 
-    if (!vi->queued_work_first)
+    if (!env->queued_work_first)
         return;
 
-    while ((wi = vi->queued_work_first)) {
-        vi->queued_work_first = wi->next;
+    while ((wi = env->queued_work_first)) {
+        env->queued_work_first = wi->next;
         wi->func(wi->data);
         wi->done = true;
     }
-    vi->queued_work_last = NULL;
+    env->queued_work_last = NULL;
     pthread_cond_broadcast(&qemu_work_cond);
 }
 
@@ -291,49 +269,55 @@
     cpu_single_env = env;
     flush_queued_work(env);
 
-    if (vcpu_info[env->cpu_index].stop) {
-	vcpu_info[env->cpu_index].stop = 0;
-	vcpu_info[env->cpu_index].stopped = 1;
+    if (env->stop) {
+	env->stop = 0;
+	env->stopped = 1;
 	pthread_cond_signal(&qemu_pause_cond);
     }
 
-    vcpu_info[env->cpu_index].signalled = 0;
+    env->signalled = 0;
 }
 
 static int all_threads_paused(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
+
+    while (penv) {
+        if (penv->stop)
+            return 0;
+        penv = (CPUState *)penv->next_cpu;
+    }
 
-    for (i = 0; i < smp_cpus; ++i)
-	if (vcpu_info[i].stop)
-	    return 0;
     return 1;
 }
 
 static void pause_all_threads(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     assert(!cpu_single_env);
 
-    for (i = 0; i < smp_cpus; ++i) {
-	vcpu_info[i].stop = 1;
-	pthread_kill(vcpu_info[i].thread, SIG_IPI);
+    while (penv) {
+        penv->stop = 1;
+        pthread_kill(penv->thread, SIG_IPI);
+        penv = (CPUState *)penv->next_cpu;
     }
+
     while (!all_threads_paused())
 	qemu_cond_wait(&qemu_pause_cond);
 }
 
 static void resume_all_threads(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     assert(!cpu_single_env);
 
-    for (i = 0; i < smp_cpus; ++i) {
-	vcpu_info[i].stop = 0;
-	vcpu_info[i].stopped = 0;
-	pthread_kill(vcpu_info[i].thread, SIG_IPI);
+    while (penv) {
+        penv->stop = 0;
+        penv->stopped = 0;
+        pthread_kill(penv->thread, SIG_IPI);
+        penv = (CPUState *)penv->next_cpu;
     }
 }
 
@@ -348,7 +332,7 @@
 static void update_regs_for_sipi(CPUState *env)
 {
     kvm_arch_update_regs_for_sipi(env);
-    vcpu_info[env->cpu_index].sipi_needed = 0;
+    env->sipi_needed = 0;
 }
 
 static void update_regs_for_init(CPUState *env)
@@ -361,11 +345,11 @@
 
 #ifdef TARGET_I386
     /* restore SIPI vector */
-    if(vcpu_info[env->cpu_index].sipi_needed)
+    if(env->sipi_needed)
         env->segs[R_CS] = cs;
-
-    vcpu_info[env->cpu_index].init = 0;
 #endif
+
+    env->init = 0;
     kvm_arch_load_regs(env);
 }
 
@@ -387,22 +371,22 @@
 
 void qemu_kvm_system_reset(void)
 {
-    int i;
+    CPUState *penv = first_cpu;
 
     pause_all_threads();
 
     qemu_system_reset();
 
-    for (i = 0; i < smp_cpus; ++i)
-	kvm_arch_cpu_reset(vcpu_info[i].env);
+    while (penv) {
+        kvm_arch_cpu_reset(penv);
+        penv = (CPUState *)penv->next_cpu;
+    }
 
     resume_all_threads();
 }
 
 static int kvm_main_loop_cpu(CPUState *env)
 {
-    struct vcpu_info *info = &vcpu_info[env->cpu_index];
-
     setup_kernel_sigmask(env);
 
     pthread_mutex_lock(&qemu_mutex);
@@ -423,12 +407,12 @@
 	if (env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI))
 	    env->halted = 0;
     if (!kvm_irqchip_in_kernel(kvm_context)) {
-	    if (info->init)
+	    if (env->init)
 	        update_regs_for_init(env);
-	    if (info->sipi_needed)
+	    if (env->sipi_needed)
 	        update_regs_for_sipi(env);
     }
-	if (!env->halted && !info->init)
+	if (!env->halted && !env->init)
 	    kvm_cpu_exec(env);
 	env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
 	kvm_main_loop_wait(env, 0);
@@ -442,9 +426,8 @@
     CPUState *env = _env;
     sigset_t signals;
 
-    vcpu = &vcpu_info[env->cpu_index];
-    vcpu->env = env;
-    vcpu->env->thread_id = kvm_get_thread_id();
+    current_env = env;
+    env->thread_id = kvm_get_thread_id();
     sigfillset(&signals);
     sigprocmask(SIG_BLOCK, &signals, NULL);
     kvm_create_vcpu(kvm_context, env->cpu_index);
@@ -452,7 +435,7 @@
 
     /* signal VCPU creation */
     pthread_mutex_lock(&qemu_mutex);
-    vcpu->created = 1;
+    current_env->created = 1;
     pthread_cond_signal(&qemu_vcpu_cond);
 
     /* and wait for machine initialization */
@@ -466,9 +449,9 @@
 
 void kvm_init_new_ap(int cpu, CPUState *env)
 {
-    pthread_create(&vcpu_info[cpu].thread, NULL, ap_main_loop, env);
+    pthread_create(&env->thread, NULL, ap_main_loop, env);
 
-    while (vcpu_info[cpu].created == 0)
+    while (env->created == 0)
 	qemu_cond_wait(&qemu_vcpu_cond);
 }
 
@@ -624,10 +607,12 @@
     return 0;
 }
 
-static int kvm_debug(void *opaque, int vcpu)
+static int kvm_debug(void *opaque, void *data)
 {
+    struct CPUState *env = (struct CPUState *)data;
+
     kvm_debug_stop_requested = 1;
-    vcpu_info[vcpu].stopped = 1;
+    env->stopped = 1;
     return 1;
 }
 
@@ -721,10 +706,12 @@
     return kvm_arch_halt(opaque, vcpu);
 }
 
-static int kvm_shutdown(void *opaque, int vcpu)
+static int kvm_shutdown(void *opaque, void *data)
 {
+    struct CPUState *env = (struct CPUState *)data;
+
     /* stop the current vcpu from going back to guest mode */
-    vcpu_info[cpu_single_env->cpu_index].stopped = 1;
+    env->stopped = 1;
 
     qemu_system_reset_request();
     return 1;
Index: kvm-userspace.git/qemu/qemu-kvm.h
===================================================================
--- kvm-userspace.git.orig/qemu/qemu-kvm.h
+++ kvm-userspace.git/qemu/qemu-kvm.h
@@ -63,16 +63,14 @@
 void kvm_arch_load_regs(CPUState *env);
 int kvm_arch_qemu_init_env(CPUState *cenv);
 int kvm_arch_halt(void *opaque, int vcpu);
-void kvm_arch_pre_kvm_run(void *opaque, int vcpu);
-void kvm_arch_post_kvm_run(void *opaque, int vcpu);
+void kvm_arch_pre_kvm_run(void *opaque, CPUState *env);
+void kvm_arch_post_kvm_run(void *opaque, CPUState *env);
 int kvm_arch_has_work(CPUState *env);
 int kvm_arch_try_push_interrupts(void *opaque);
 int kvm_arch_try_push_nmi(void *opaque);
 void kvm_arch_update_regs_for_sipi(CPUState *env);
 void kvm_arch_cpu_reset(CPUState *env);
 
-CPUState *qemu_kvm_cpu_env(int index);
-
 void qemu_kvm_aio_wait_start(void);
 void qemu_kvm_aio_wait(void);
 void qemu_kvm_aio_wait_end(void);
Index: kvm-userspace.git/qemu/target-ia64/cpu.h
===================================================================
--- kvm-userspace.git.orig/qemu/target-ia64/cpu.h
+++ kvm-userspace.git/qemu/target-ia64/cpu.h
@@ -40,6 +40,7 @@
 #include "cpu-defs.h"
 
 #include "softfloat.h"
+
 typedef struct CPUIA64State {
     CPU_COMMON;
     uint32_t hflags;

[Index of Archives]     [Linux KVM Devel]     [Linux Virtualization]     [Big List of Linux Books]     [Linux SCSI]     [Yosemite Forum]

  Powered by Linux