Scripted conversion: sed -i "s/CPUState/CPUX86State/g" target-i386/*.[hc] sed -i "s/#define CPUX86State/#define CPUState/" target-i386/cpu.h Signed-off-by: Andreas Färber <afaerber@xxxxxxx> Acked-by: Anthony Liguori <aliguori@xxxxxxxxxx> --- target-i386/cpu.h | 34 +++++----- target-i386/helper.c | 38 +++++----- target-i386/kvm.c | 76 ++++++++++---------- target-i386/machine.c | 172 +++++++++++++++++++++++----------------------- target-i386/op_helper.c | 34 +++++----- target-i386/translate.c | 82 +++++++++++----------- 6 files changed, 218 insertions(+), 218 deletions(-) diff --git a/target-i386/cpu.h b/target-i386/cpu.h index 36e3d29..6e26d21 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -788,7 +788,7 @@ int cpu_x86_exec(CPUX86State *s); void cpu_x86_close(CPUX86State *s); void x86_cpu_list (FILE *f, fprintf_function cpu_fprintf, const char *optarg); void x86_cpudef_setup(void); -int cpu_x86_support_mca_broadcast(CPUState *env); +int cpu_x86_support_mca_broadcast(CPUX86State *env); int cpu_get_pic_interrupt(CPUX86State *s); /* MSDOS compatibility mode FPU exception support */ @@ -970,7 +970,7 @@ uint64_t cpu_get_tsc(CPUX86State *env); #define MMU_MODE0_SUFFIX _kernel #define MMU_MODE1_SUFFIX _user #define MMU_USER_IDX 1 -static inline int cpu_mmu_index (CPUState *env) +static inline int cpu_mmu_index (CPUX86State *env) { return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0; } @@ -1009,7 +1009,7 @@ static inline int cpu_mmu_index (CPUState *env) void optimize_flags_init(void); #if defined(CONFIG_USER_ONLY) -static inline void cpu_clone_regs(CPUState *env, target_ulong newsp) +static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp) { if (newsp) env->regs[R_ESP] = newsp; @@ -1024,7 +1024,7 @@ static inline void cpu_clone_regs(CPUState *env, target_ulong newsp) #include "hw/apic.h" #endif -static inline bool cpu_has_work(CPUState *env) +static inline bool cpu_has_work(CPUX86State *env) { return ((env->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) || @@ -1036,12 +1036,12 @@ static inline bool cpu_has_work(CPUState *env) #include "exec-all.h" -static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb) +static inline void cpu_pc_from_tb(CPUX86State *env, TranslationBlock *tb) { env->eip = tb->pc - tb->cs_base; } -static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc, +static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc, target_ulong *cs_base, int *flags) { *cs_base = env->segs[R_CS].base; @@ -1050,29 +1050,29 @@ static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc, (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK)); } -void do_cpu_init(CPUState *env); -void do_cpu_sipi(CPUState *env); +void do_cpu_init(CPUX86State *env); +void do_cpu_sipi(CPUX86State *env); #define MCE_INJECT_BROADCAST 1 #define MCE_INJECT_UNCOND_AO 2 -void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank, +void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank, uint64_t status, uint64_t mcg_status, uint64_t addr, uint64_t misc, int flags); /* op_helper.c */ -void do_interrupt(CPUState *env); -void do_interrupt_x86_hardirq(CPUState *env, int intno, int is_hw); -void QEMU_NORETURN raise_exception_env(int exception_index, CPUState *nenv); -void QEMU_NORETURN raise_exception_err_env(CPUState *nenv, int exception_index, +void do_interrupt(CPUX86State *env); +void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw); +void QEMU_NORETURN raise_exception_env(int exception_index, CPUX86State *nenv); +void QEMU_NORETURN raise_exception_err_env(CPUX86State *nenv, int exception_index, int error_code); -void do_smm_enter(CPUState *env1); +void do_smm_enter(CPUX86State *env1); -void svm_check_intercept(CPUState *env1, uint32_t type); +void svm_check_intercept(CPUX86State *env1, uint32_t type); -uint32_t cpu_cc_compute_all(CPUState *env1, int op); +uint32_t cpu_cc_compute_all(CPUX86State *env1, int op); -void cpu_report_tpr_access(CPUState *env, TPRAccess access); +void cpu_report_tpr_access(CPUX86State *env, TPRAccess access); #endif /* CPU_I386_H */ diff --git a/target-i386/helper.c b/target-i386/helper.c index 140c696..83122bf 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -27,7 +27,7 @@ //#define DEBUG_MMU /* NOTE: must be called outside the CPU execute loop */ -void cpu_state_reset(CPUState *env) +void cpu_state_reset(CPUX86State *env) { int i; @@ -106,7 +106,7 @@ void cpu_x86_close(CPUX86State *env) g_free(env); } -static void cpu_x86_version(CPUState *env, int *family, int *model) +static void cpu_x86_version(CPUX86State *env, int *family, int *model) { int cpuver = env->cpuid_version; @@ -119,7 +119,7 @@ static void cpu_x86_version(CPUState *env, int *family, int *model) } /* Broadcast MCA signal for processor version 06H_EH and above */ -int cpu_x86_support_mca_broadcast(CPUState *env) +int cpu_x86_support_mca_broadcast(CPUX86State *env) { int family = 0; int model = 0; @@ -191,7 +191,7 @@ static const char *cc_op_str[] = { }; static void -cpu_x86_dump_seg_cache(CPUState *env, FILE *f, fprintf_function cpu_fprintf, +cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf, const char *name, struct SegmentCache *sc) { #ifdef TARGET_X86_64 @@ -248,7 +248,7 @@ done: #define DUMP_CODE_BYTES_TOTAL 50 #define DUMP_CODE_BYTES_BACKWARD 20 -void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf, +void cpu_dump_state(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf, int flags) { int eflags, i, nb; @@ -857,7 +857,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, return 1; } -target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) +target_phys_addr_t cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr) { target_ulong pde_addr, pte_addr; uint64_t pte; @@ -952,7 +952,7 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr) return paddr; } -void hw_breakpoint_insert(CPUState *env, int index) +void hw_breakpoint_insert(CPUX86State *env, int index) { int type, err = 0; @@ -980,7 +980,7 @@ void hw_breakpoint_insert(CPUState *env, int index) env->cpu_breakpoint[index] = NULL; } -void hw_breakpoint_remove(CPUState *env, int index) +void hw_breakpoint_remove(CPUX86State *env, int index) { if (!env->cpu_breakpoint[index]) return; @@ -999,7 +999,7 @@ void hw_breakpoint_remove(CPUState *env, int index) } } -int check_hw_breakpoints(CPUState *env, int force_dr6_update) +int check_hw_breakpoints(CPUX86State *env, int force_dr6_update) { target_ulong dr6; int reg, type; @@ -1023,7 +1023,7 @@ int check_hw_breakpoints(CPUState *env, int force_dr6_update) static CPUDebugExcpHandler *prev_debug_excp_handler; -static void breakpoint_handler(CPUState *env) +static void breakpoint_handler(CPUX86State *env) { CPUBreakpoint *bp; @@ -1051,7 +1051,7 @@ static void breakpoint_handler(CPUState *env) typedef struct MCEInjectionParams { Monitor *mon; - CPUState *env; + CPUX86State *env; int bank; uint64_t status; uint64_t mcg_status; @@ -1063,7 +1063,7 @@ typedef struct MCEInjectionParams { static void do_inject_x86_mce(void *data) { MCEInjectionParams *params = data; - CPUState *cenv = params->env; + CPUX86State *cenv = params->env; uint64_t *banks = cenv->mce_banks + 4 * params->bank; cpu_synchronize_state(cenv); @@ -1133,7 +1133,7 @@ static void do_inject_x86_mce(void *data) } } -void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank, +void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank, uint64_t status, uint64_t mcg_status, uint64_t addr, uint64_t misc, int flags) { @@ -1148,7 +1148,7 @@ void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank, .flags = flags, }; unsigned bank_num = cenv->mcg_cap & 0xff; - CPUState *env; + CPUX86State *env; if (!cenv->mcg_cap) { monitor_printf(mon, "MCE injection not supported\n"); @@ -1185,7 +1185,7 @@ void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank, } } -void cpu_report_tpr_access(CPUState *env, TPRAccess access) +void cpu_report_tpr_access(CPUX86State *env, TPRAccess access) { TranslationBlock *tb; @@ -1277,7 +1277,7 @@ CPUX86State *cpu_x86_init(const char *cpu_model) } #if !defined(CONFIG_USER_ONLY) -void do_cpu_init(CPUState *env) +void do_cpu_init(CPUX86State *env) { int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI; uint64_t pat = env->pat; @@ -1289,15 +1289,15 @@ void do_cpu_init(CPUState *env) env->halted = !cpu_is_bsp(env); } -void do_cpu_sipi(CPUState *env) +void do_cpu_sipi(CPUX86State *env) { apic_sipi(env->apic_state); } #else -void do_cpu_init(CPUState *env) +void do_cpu_init(CPUX86State *env) { } -void do_cpu_sipi(CPUState *env) +void do_cpu_sipi(CPUX86State *env) { } #endif diff --git a/target-i386/kvm.c b/target-i386/kvm.c index 619d773..e74a9e4 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -221,7 +221,7 @@ static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap, return -ENOSYS; } -static void kvm_mce_inject(CPUState *env, target_phys_addr_t paddr, int code) +static void kvm_mce_inject(CPUX86State *env, target_phys_addr_t paddr, int code) { uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S; @@ -246,7 +246,7 @@ static void hardware_memory_error(void) exit(1); } -int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr) +int kvm_arch_on_sigbus_vcpu(CPUX86State *env, int code, void *addr) { ram_addr_t ram_addr; target_phys_addr_t paddr; @@ -306,7 +306,7 @@ int kvm_arch_on_sigbus(int code, void *addr) return 0; } -static int kvm_inject_mce_oldstyle(CPUState *env) +static int kvm_inject_mce_oldstyle(CPUX86State *env) { if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) { unsigned int bank, bank_num = env->mcg_cap & 0xff; @@ -338,14 +338,14 @@ static int kvm_inject_mce_oldstyle(CPUState *env) static void cpu_update_state(void *opaque, int running, RunState state) { - CPUState *env = opaque; + CPUX86State *env = opaque; if (running) { env->tsc_valid = false; } } -int kvm_arch_init_vcpu(CPUState *env) +int kvm_arch_init_vcpu(CPUX86State *env) { struct { struct kvm_cpuid2 cpuid; @@ -577,7 +577,7 @@ int kvm_arch_init_vcpu(CPUState *env) return 0; } -void kvm_arch_reset_vcpu(CPUState *env) +void kvm_arch_reset_vcpu(CPUX86State *env) { env->exception_injected = -1; env->interrupt_injected = -1; @@ -768,7 +768,7 @@ static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set) } } -static int kvm_getput_regs(CPUState *env, int set) +static int kvm_getput_regs(CPUX86State *env, int set) { struct kvm_regs regs; int ret = 0; @@ -809,7 +809,7 @@ static int kvm_getput_regs(CPUState *env, int set) return ret; } -static int kvm_put_fpu(CPUState *env) +static int kvm_put_fpu(CPUX86State *env) { struct kvm_fpu fpu; int i; @@ -841,7 +841,7 @@ static int kvm_put_fpu(CPUState *env) #define XSAVE_XSTATE_BV 128 #define XSAVE_YMMH_SPACE 144 -static int kvm_put_xsave(CPUState *env) +static int kvm_put_xsave(CPUX86State *env) { struct kvm_xsave* xsave = env->kvm_xsave_buf; uint16_t cwd, swd, twd; @@ -875,7 +875,7 @@ static int kvm_put_xsave(CPUState *env) return r; } -static int kvm_put_xcrs(CPUState *env) +static int kvm_put_xcrs(CPUX86State *env) { struct kvm_xcrs xcrs; @@ -890,7 +890,7 @@ static int kvm_put_xcrs(CPUState *env) return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs); } -static int kvm_put_sregs(CPUState *env) +static int kvm_put_sregs(CPUX86State *env) { struct kvm_sregs sregs; @@ -946,7 +946,7 @@ static void kvm_msr_entry_set(struct kvm_msr_entry *entry, entry->data = value; } -static int kvm_put_msrs(CPUState *env, int level) +static int kvm_put_msrs(CPUX86State *env, int level) { struct { struct kvm_msrs info; @@ -1029,7 +1029,7 @@ static int kvm_put_msrs(CPUState *env, int level) } -static int kvm_get_fpu(CPUState *env) +static int kvm_get_fpu(CPUX86State *env) { struct kvm_fpu fpu; int i, ret; @@ -1055,7 +1055,7 @@ static int kvm_get_fpu(CPUState *env) return 0; } -static int kvm_get_xsave(CPUState *env) +static int kvm_get_xsave(CPUX86State *env) { struct kvm_xsave* xsave = env->kvm_xsave_buf; int ret, i; @@ -1093,7 +1093,7 @@ static int kvm_get_xsave(CPUState *env) return 0; } -static int kvm_get_xcrs(CPUState *env) +static int kvm_get_xcrs(CPUX86State *env) { int i, ret; struct kvm_xcrs xcrs; @@ -1117,7 +1117,7 @@ static int kvm_get_xcrs(CPUState *env) return 0; } -static int kvm_get_sregs(CPUState *env) +static int kvm_get_sregs(CPUX86State *env) { struct kvm_sregs sregs; uint32_t hflags; @@ -1201,7 +1201,7 @@ static int kvm_get_sregs(CPUState *env) return 0; } -static int kvm_get_msrs(CPUState *env) +static int kvm_get_msrs(CPUX86State *env) { struct { struct kvm_msrs info; @@ -1331,14 +1331,14 @@ static int kvm_get_msrs(CPUState *env) return 0; } -static int kvm_put_mp_state(CPUState *env) +static int kvm_put_mp_state(CPUX86State *env) { struct kvm_mp_state mp_state = { .mp_state = env->mp_state }; return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state); } -static int kvm_get_mp_state(CPUState *env) +static int kvm_get_mp_state(CPUX86State *env) { struct kvm_mp_state mp_state; int ret; @@ -1354,7 +1354,7 @@ static int kvm_get_mp_state(CPUState *env) return 0; } -static int kvm_get_apic(CPUState *env) +static int kvm_get_apic(CPUX86State *env) { DeviceState *apic = env->apic_state; struct kvm_lapic_state kapic; @@ -1371,7 +1371,7 @@ static int kvm_get_apic(CPUState *env) return 0; } -static int kvm_put_apic(CPUState *env) +static int kvm_put_apic(CPUX86State *env) { DeviceState *apic = env->apic_state; struct kvm_lapic_state kapic; @@ -1384,7 +1384,7 @@ static int kvm_put_apic(CPUState *env) return 0; } -static int kvm_put_vcpu_events(CPUState *env, int level) +static int kvm_put_vcpu_events(CPUX86State *env, int level) { struct kvm_vcpu_events events; @@ -1418,7 +1418,7 @@ static int kvm_put_vcpu_events(CPUState *env, int level) return kvm_vcpu_ioctl(env, KVM_SET_VCPU_EVENTS, &events); } -static int kvm_get_vcpu_events(CPUState *env) +static int kvm_get_vcpu_events(CPUX86State *env) { struct kvm_vcpu_events events; int ret; @@ -1453,7 +1453,7 @@ static int kvm_get_vcpu_events(CPUState *env) return 0; } -static int kvm_guest_debug_workarounds(CPUState *env) +static int kvm_guest_debug_workarounds(CPUX86State *env) { int ret = 0; unsigned long reinject_trap = 0; @@ -1482,7 +1482,7 @@ static int kvm_guest_debug_workarounds(CPUState *env) return ret; } -static int kvm_put_debugregs(CPUState *env) +static int kvm_put_debugregs(CPUX86State *env) { struct kvm_debugregs dbgregs; int i; @@ -1501,7 +1501,7 @@ static int kvm_put_debugregs(CPUState *env) return kvm_vcpu_ioctl(env, KVM_SET_DEBUGREGS, &dbgregs); } -static int kvm_get_debugregs(CPUState *env) +static int kvm_get_debugregs(CPUX86State *env) { struct kvm_debugregs dbgregs; int i, ret; @@ -1523,7 +1523,7 @@ static int kvm_get_debugregs(CPUState *env) return 0; } -int kvm_arch_put_registers(CPUState *env, int level) +int kvm_arch_put_registers(CPUX86State *env, int level) { int ret; @@ -1580,7 +1580,7 @@ int kvm_arch_put_registers(CPUState *env, int level) return 0; } -int kvm_arch_get_registers(CPUState *env) +int kvm_arch_get_registers(CPUX86State *env) { int ret; @@ -1625,7 +1625,7 @@ int kvm_arch_get_registers(CPUState *env) return 0; } -void kvm_arch_pre_run(CPUState *env, struct kvm_run *run) +void kvm_arch_pre_run(CPUX86State *env, struct kvm_run *run) { int ret; @@ -1685,7 +1685,7 @@ void kvm_arch_pre_run(CPUState *env, struct kvm_run *run) } } -void kvm_arch_post_run(CPUState *env, struct kvm_run *run) +void kvm_arch_post_run(CPUX86State *env, struct kvm_run *run) { if (run->if_flag) { env->eflags |= IF_MASK; @@ -1696,7 +1696,7 @@ void kvm_arch_post_run(CPUState *env, struct kvm_run *run) cpu_set_apic_base(env->apic_state, run->apic_base); } -int kvm_arch_process_async_events(CPUState *env) +int kvm_arch_process_async_events(CPUX86State *env) { if (env->interrupt_request & CPU_INTERRUPT_MCE) { /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */ @@ -1748,7 +1748,7 @@ int kvm_arch_process_async_events(CPUState *env) return env->halted; } -static int kvm_handle_halt(CPUState *env) +static int kvm_handle_halt(CPUX86State *env) { if (!((env->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) && @@ -1760,7 +1760,7 @@ static int kvm_handle_halt(CPUState *env) return 0; } -static int kvm_handle_tpr_access(CPUState *env) +static int kvm_handle_tpr_access(CPUX86State *env) { struct kvm_run *run = env->kvm_run; @@ -1770,7 +1770,7 @@ static int kvm_handle_tpr_access(CPUState *env) return 1; } -int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp) +int kvm_arch_insert_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp) { static const uint8_t int3 = 0xcc; @@ -1781,7 +1781,7 @@ int kvm_arch_insert_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp) return 0; } -int kvm_arch_remove_sw_breakpoint(CPUState *env, struct kvm_sw_breakpoint *bp) +int kvm_arch_remove_sw_breakpoint(CPUX86State *env, struct kvm_sw_breakpoint *bp) { uint8_t int3; @@ -1924,7 +1924,7 @@ static int kvm_handle_debug(struct kvm_debug_exit_arch *arch_info) return ret; } -void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg) +void kvm_arch_update_guest_debug(CPUX86State *env, struct kvm_guest_debug *dbg) { const uint8_t type_code[] = { [GDB_BREAKPOINT_HW] = 0x0, @@ -1961,7 +1961,7 @@ static bool host_supports_vmx(void) #define VMX_INVALID_GUEST_STATE 0x80000021 -int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run) +int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run) { uint64_t code; int ret; @@ -2012,7 +2012,7 @@ int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run) return ret; } -bool kvm_arch_stop_on_emulation_error(CPUState *env) +bool kvm_arch_stop_on_emulation_error(CPUX86State *env) { kvm_cpu_synchronize_state(env); return !(env->cr[0] & CR0_PE_MASK) || diff --git a/target-i386/machine.c b/target-i386/machine.c index d6e98ff..a8be058 100644 --- a/target-i386/machine.c +++ b/target-i386/machine.c @@ -171,14 +171,14 @@ static const VMStateInfo vmstate_fpreg_1_no_mmx = { static bool fpregs_is_0(void *opaque, int version_id) { - CPUState *env = opaque; + CPUX86State *env = opaque; return (env->fpregs_format_vmstate == 0); } static bool fpregs_is_1_mmx(void *opaque, int version_id) { - CPUState *env = opaque; + CPUX86State *env = opaque; int guess_mmx; guess_mmx = ((env->fptag_vmstate == 0xff) && @@ -188,7 +188,7 @@ static bool fpregs_is_1_mmx(void *opaque, int version_id) static bool fpregs_is_1_no_mmx(void *opaque, int version_id) { - CPUState *env = opaque; + CPUX86State *env = opaque; int guess_mmx; guess_mmx = ((env->fptag_vmstate == 0xff) && @@ -237,7 +237,7 @@ static const VMStateInfo vmstate_hack_uint64_as_uint32 = { static void cpu_pre_save(void *opaque) { - CPUState *env = opaque; + CPUX86State *env = opaque; int i; /* FPU */ @@ -252,7 +252,7 @@ static void cpu_pre_save(void *opaque) static int cpu_post_load(void *opaque, int version_id) { - CPUState *env = opaque; + CPUX86State *env = opaque; int i; /* XXX: restore FPU round state */ @@ -274,7 +274,7 @@ static int cpu_post_load(void *opaque, int version_id) static bool async_pf_msr_needed(void *opaque) { - CPUState *cpu = opaque; + CPUX86State *cpu = opaque; return cpu->async_pf_en_msr != 0; } @@ -285,14 +285,14 @@ static const VMStateDescription vmstate_async_pf_msr = { .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField []) { - VMSTATE_UINT64(async_pf_en_msr, CPUState), + VMSTATE_UINT64(async_pf_en_msr, CPUX86State), VMSTATE_END_OF_LIST() } }; static bool fpop_ip_dp_needed(void *opaque) { - CPUState *env = opaque; + CPUX86State *env = opaque; return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0; } @@ -303,16 +303,16 @@ static const VMStateDescription vmstate_fpop_ip_dp = { .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField []) { - VMSTATE_UINT16(fpop, CPUState), - VMSTATE_UINT64(fpip, CPUState), - VMSTATE_UINT64(fpdp, CPUState), + VMSTATE_UINT16(fpop, CPUX86State), + VMSTATE_UINT64(fpip, CPUX86State), + VMSTATE_UINT64(fpdp, CPUX86State), VMSTATE_END_OF_LIST() } }; static bool tscdeadline_needed(void *opaque) { - CPUState *env = opaque; + CPUX86State *env = opaque; return env->tsc_deadline != 0; } @@ -323,14 +323,14 @@ static const VMStateDescription vmstate_msr_tscdeadline = { .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField []) { - VMSTATE_UINT64(tsc_deadline, CPUState), + VMSTATE_UINT64(tsc_deadline, CPUX86State), VMSTATE_END_OF_LIST() } }; static bool misc_enable_needed(void *opaque) { - CPUState *env = opaque; + CPUX86State *env = opaque; return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT; } @@ -341,7 +341,7 @@ static const VMStateDescription vmstate_msr_ia32_misc_enable = { .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField []) { - VMSTATE_UINT64(msr_ia32_misc_enable, CPUState), + VMSTATE_UINT64(msr_ia32_misc_enable, CPUX86State), VMSTATE_END_OF_LIST() } }; @@ -354,98 +354,98 @@ static const VMStateDescription vmstate_cpu = { .pre_save = cpu_pre_save, .post_load = cpu_post_load, .fields = (VMStateField []) { - VMSTATE_UINTTL_ARRAY(regs, CPUState, CPU_NB_REGS), - VMSTATE_UINTTL(eip, CPUState), - VMSTATE_UINTTL(eflags, CPUState), - VMSTATE_UINT32(hflags, CPUState), + VMSTATE_UINTTL_ARRAY(regs, CPUX86State, CPU_NB_REGS), + VMSTATE_UINTTL(eip, CPUX86State), + VMSTATE_UINTTL(eflags, CPUX86State), + VMSTATE_UINT32(hflags, CPUX86State), /* FPU */ - VMSTATE_UINT16(fpuc, CPUState), - VMSTATE_UINT16(fpus_vmstate, CPUState), - VMSTATE_UINT16(fptag_vmstate, CPUState), - VMSTATE_UINT16(fpregs_format_vmstate, CPUState), - VMSTATE_FP_REGS(fpregs, CPUState, 8), + VMSTATE_UINT16(fpuc, CPUX86State), + VMSTATE_UINT16(fpus_vmstate, CPUX86State), + VMSTATE_UINT16(fptag_vmstate, CPUX86State), + VMSTATE_UINT16(fpregs_format_vmstate, CPUX86State), + VMSTATE_FP_REGS(fpregs, CPUX86State, 8), - VMSTATE_SEGMENT_ARRAY(segs, CPUState, 6), - VMSTATE_SEGMENT(ldt, CPUState), - VMSTATE_SEGMENT(tr, CPUState), - VMSTATE_SEGMENT(gdt, CPUState), - VMSTATE_SEGMENT(idt, CPUState), + VMSTATE_SEGMENT_ARRAY(segs, CPUX86State, 6), + VMSTATE_SEGMENT(ldt, CPUX86State), + VMSTATE_SEGMENT(tr, CPUX86State), + VMSTATE_SEGMENT(gdt, CPUX86State), + VMSTATE_SEGMENT(idt, CPUX86State), - VMSTATE_UINT32(sysenter_cs, CPUState), + VMSTATE_UINT32(sysenter_cs, CPUX86State), #ifdef TARGET_X86_64 /* Hack: In v7 size changed from 32 to 64 bits on x86_64 */ - VMSTATE_HACK_UINT32(sysenter_esp, CPUState, less_than_7), - VMSTATE_HACK_UINT32(sysenter_eip, CPUState, less_than_7), - VMSTATE_UINTTL_V(sysenter_esp, CPUState, 7), - VMSTATE_UINTTL_V(sysenter_eip, CPUState, 7), + VMSTATE_HACK_UINT32(sysenter_esp, CPUX86State, less_than_7), + VMSTATE_HACK_UINT32(sysenter_eip, CPUX86State, less_than_7), + VMSTATE_UINTTL_V(sysenter_esp, CPUX86State, 7), + VMSTATE_UINTTL_V(sysenter_eip, CPUX86State, 7), #else - VMSTATE_UINTTL(sysenter_esp, CPUState), - VMSTATE_UINTTL(sysenter_eip, CPUState), + VMSTATE_UINTTL(sysenter_esp, CPUX86State), + VMSTATE_UINTTL(sysenter_eip, CPUX86State), #endif - VMSTATE_UINTTL(cr[0], CPUState), - VMSTATE_UINTTL(cr[2], CPUState), - VMSTATE_UINTTL(cr[3], CPUState), - VMSTATE_UINTTL(cr[4], CPUState), - VMSTATE_UINTTL_ARRAY(dr, CPUState, 8), + VMSTATE_UINTTL(cr[0], CPUX86State), + VMSTATE_UINTTL(cr[2], CPUX86State), + VMSTATE_UINTTL(cr[3], CPUX86State), + VMSTATE_UINTTL(cr[4], CPUX86State), + VMSTATE_UINTTL_ARRAY(dr, CPUX86State, 8), /* MMU */ - VMSTATE_INT32(a20_mask, CPUState), + VMSTATE_INT32(a20_mask, CPUX86State), /* XMM */ - VMSTATE_UINT32(mxcsr, CPUState), - VMSTATE_XMM_REGS(xmm_regs, CPUState, CPU_NB_REGS), + VMSTATE_UINT32(mxcsr, CPUX86State), + VMSTATE_XMM_REGS(xmm_regs, CPUX86State, CPU_NB_REGS), #ifdef TARGET_X86_64 - VMSTATE_UINT64(efer, CPUState), - VMSTATE_UINT64(star, CPUState), - VMSTATE_UINT64(lstar, CPUState), - VMSTATE_UINT64(cstar, CPUState), - VMSTATE_UINT64(fmask, CPUState), - VMSTATE_UINT64(kernelgsbase, CPUState), + VMSTATE_UINT64(efer, CPUX86State), + VMSTATE_UINT64(star, CPUX86State), + VMSTATE_UINT64(lstar, CPUX86State), + VMSTATE_UINT64(cstar, CPUX86State), + VMSTATE_UINT64(fmask, CPUX86State), + VMSTATE_UINT64(kernelgsbase, CPUX86State), #endif - VMSTATE_UINT32_V(smbase, CPUState, 4), + VMSTATE_UINT32_V(smbase, CPUX86State, 4), - VMSTATE_UINT64_V(pat, CPUState, 5), - VMSTATE_UINT32_V(hflags2, CPUState, 5), + VMSTATE_UINT64_V(pat, CPUX86State, 5), + VMSTATE_UINT32_V(hflags2, CPUX86State, 5), - VMSTATE_UINT32_TEST(halted, CPUState, version_is_5), - VMSTATE_UINT64_V(vm_hsave, CPUState, 5), - VMSTATE_UINT64_V(vm_vmcb, CPUState, 5), - VMSTATE_UINT64_V(tsc_offset, CPUState, 5), - VMSTATE_UINT64_V(intercept, CPUState, 5), - VMSTATE_UINT16_V(intercept_cr_read, CPUState, 5), - VMSTATE_UINT16_V(intercept_cr_write, CPUState, 5), - VMSTATE_UINT16_V(intercept_dr_read, CPUState, 5), - VMSTATE_UINT16_V(intercept_dr_write, CPUState, 5), - VMSTATE_UINT32_V(intercept_exceptions, CPUState, 5), - VMSTATE_UINT8_V(v_tpr, CPUState, 5), + VMSTATE_UINT32_TEST(halted, CPUX86State, version_is_5), + VMSTATE_UINT64_V(vm_hsave, CPUX86State, 5), + VMSTATE_UINT64_V(vm_vmcb, CPUX86State, 5), + VMSTATE_UINT64_V(tsc_offset, CPUX86State, 5), + VMSTATE_UINT64_V(intercept, CPUX86State, 5), + VMSTATE_UINT16_V(intercept_cr_read, CPUX86State, 5), + VMSTATE_UINT16_V(intercept_cr_write, CPUX86State, 5), + VMSTATE_UINT16_V(intercept_dr_read, CPUX86State, 5), + VMSTATE_UINT16_V(intercept_dr_write, CPUX86State, 5), + VMSTATE_UINT32_V(intercept_exceptions, CPUX86State, 5), + VMSTATE_UINT8_V(v_tpr, CPUX86State, 5), /* MTRRs */ - VMSTATE_UINT64_ARRAY_V(mtrr_fixed, CPUState, 11, 8), - VMSTATE_UINT64_V(mtrr_deftype, CPUState, 8), - VMSTATE_MTRR_VARS(mtrr_var, CPUState, 8, 8), + VMSTATE_UINT64_ARRAY_V(mtrr_fixed, CPUX86State, 11, 8), + VMSTATE_UINT64_V(mtrr_deftype, CPUX86State, 8), + VMSTATE_MTRR_VARS(mtrr_var, CPUX86State, 8, 8), /* KVM-related states */ - VMSTATE_INT32_V(interrupt_injected, CPUState, 9), - VMSTATE_UINT32_V(mp_state, CPUState, 9), - VMSTATE_UINT64_V(tsc, CPUState, 9), - VMSTATE_INT32_V(exception_injected, CPUState, 11), - VMSTATE_UINT8_V(soft_interrupt, CPUState, 11), - VMSTATE_UINT8_V(nmi_injected, CPUState, 11), - VMSTATE_UINT8_V(nmi_pending, CPUState, 11), - VMSTATE_UINT8_V(has_error_code, CPUState, 11), - VMSTATE_UINT32_V(sipi_vector, CPUState, 11), + VMSTATE_INT32_V(interrupt_injected, CPUX86State, 9), + VMSTATE_UINT32_V(mp_state, CPUX86State, 9), + VMSTATE_UINT64_V(tsc, CPUX86State, 9), + VMSTATE_INT32_V(exception_injected, CPUX86State, 11), + VMSTATE_UINT8_V(soft_interrupt, CPUX86State, 11), + VMSTATE_UINT8_V(nmi_injected, CPUX86State, 11), + VMSTATE_UINT8_V(nmi_pending, CPUX86State, 11), + VMSTATE_UINT8_V(has_error_code, CPUX86State, 11), + VMSTATE_UINT32_V(sipi_vector, CPUX86State, 11), /* MCE */ - VMSTATE_UINT64_V(mcg_cap, CPUState, 10), - VMSTATE_UINT64_V(mcg_status, CPUState, 10), - VMSTATE_UINT64_V(mcg_ctl, CPUState, 10), - VMSTATE_UINT64_ARRAY_V(mce_banks, CPUState, MCE_BANKS_DEF *4, 10), + VMSTATE_UINT64_V(mcg_cap, CPUX86State, 10), + VMSTATE_UINT64_V(mcg_status, CPUX86State, 10), + VMSTATE_UINT64_V(mcg_ctl, CPUX86State, 10), + VMSTATE_UINT64_ARRAY_V(mce_banks, CPUX86State, MCE_BANKS_DEF *4, 10), /* rdtscp */ - VMSTATE_UINT64_V(tsc_aux, CPUState, 11), + VMSTATE_UINT64_V(tsc_aux, CPUX86State, 11), /* KVM pvclock msr */ - VMSTATE_UINT64_V(system_time_msr, CPUState, 11), - VMSTATE_UINT64_V(wall_clock_msr, CPUState, 11), + VMSTATE_UINT64_V(system_time_msr, CPUX86State, 11), + VMSTATE_UINT64_V(wall_clock_msr, CPUX86State, 11), /* XSAVE related fields */ - VMSTATE_UINT64_V(xcr0, CPUState, 12), - VMSTATE_UINT64_V(xstate_bv, CPUState, 12), - VMSTATE_YMMH_REGS_VARS(ymmh_regs, CPUState, CPU_NB_REGS, 12), + VMSTATE_UINT64_V(xcr0, CPUX86State, 12), + VMSTATE_UINT64_V(xstate_bv, CPUX86State, 12), + VMSTATE_YMMH_REGS_VARS(ymmh_regs, CPUX86State, CPU_NB_REGS, 12), VMSTATE_END_OF_LIST() /* The above list is not sorted /wrt version numbers, watch out! */ }, diff --git a/target-i386/op_helper.c b/target-i386/op_helper.c index 63a08d6..c04ae44 100644 --- a/target-i386/op_helper.c +++ b/target-i386/op_helper.c @@ -125,7 +125,7 @@ static inline void load_eflags(int eflags, int update_mask) /* load efer and update the corresponding hflags. XXX: do consistency checks with cpuid bits ? */ -static inline void cpu_load_efer(CPUState *env, uint64_t val) +static inline void cpu_load_efer(CPUX86State *env, uint64_t val) { env->efer = val; env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK); @@ -1376,9 +1376,9 @@ static void do_interrupt_all(int intno, int is_int, int error_code, #endif } -void do_interrupt(CPUState *env1) +void do_interrupt(CPUX86State *env1) { - CPUState *saved_env; + CPUX86State *saved_env; saved_env = env; env = env1; @@ -1406,9 +1406,9 @@ void do_interrupt(CPUState *env1) env = saved_env; } -void do_interrupt_x86_hardirq(CPUState *env1, int intno, int is_hw) +void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw) { - CPUState *saved_env; + CPUX86State *saved_env; saved_env = env; env = env1; @@ -1492,7 +1492,7 @@ static void QEMU_NORETURN raise_exception_err(int exception_index, raise_interrupt(exception_index, 0, error_code, 0); } -void raise_exception_err_env(CPUState *nenv, int exception_index, +void raise_exception_err_env(CPUX86State *nenv, int exception_index, int error_code) { env = nenv; @@ -1504,7 +1504,7 @@ static void QEMU_NORETURN raise_exception(int exception_index) raise_interrupt(exception_index, 0, 0, 0); } -void raise_exception_env(int exception_index, CPUState *nenv) +void raise_exception_env(int exception_index, CPUX86State *nenv) { env = nenv; raise_exception(exception_index); @@ -1513,7 +1513,7 @@ void raise_exception_env(int exception_index, CPUState *nenv) #if defined(CONFIG_USER_ONLY) -void do_smm_enter(CPUState *env1) +void do_smm_enter(CPUX86State *env1) { } @@ -1529,12 +1529,12 @@ void helper_rsm(void) #define SMM_REVISION_ID 0x00020000 #endif -void do_smm_enter(CPUState *env1) +void do_smm_enter(CPUX86State *env1) { target_ulong sm_state; SegmentCache *dt; int i, offset; - CPUState *saved_env; + CPUX86State *saved_env; saved_env = env; env = env1; @@ -5002,7 +5002,7 @@ void helper_boundl(target_ulong a0, int v) NULL, it means that the function was called in C code (i.e. not from generated code or from helper.c) */ /* XXX: fix it to restore all registers */ -void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx, +void tlb_fill(CPUX86State *env1, target_ulong addr, int is_write, int mmu_idx, void *retaddr) { TranslationBlock *tb; @@ -5066,7 +5066,7 @@ void helper_svm_check_intercept_param(uint32_t type, uint64_t param) { } -void svm_check_intercept(CPUState *env1, uint32_t type) +void svm_check_intercept(CPUX86State *env1, uint32_t type) { } @@ -5101,7 +5101,7 @@ static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc) } static inline void svm_load_seg_cache(target_phys_addr_t addr, - CPUState *env, int seg_reg) + CPUX86State *env, int seg_reg) { SegmentCache sc1, *sc = &sc1; svm_load_seg(addr, sc); @@ -5460,9 +5460,9 @@ void helper_svm_check_intercept_param(uint32_t type, uint64_t param) } } -void svm_check_intercept(CPUState *env1, uint32_t type) +void svm_check_intercept(CPUX86State *env1, uint32_t type) { - CPUState *saved_env; + CPUX86State *saved_env; saved_env = env; env = env1; @@ -5840,9 +5840,9 @@ uint32_t helper_cc_compute_all(int op) } } -uint32_t cpu_cc_compute_all(CPUState *env1, int op) +uint32_t cpu_cc_compute_all(CPUX86State *env1, int op) { - CPUState *saved_env; + CPUX86State *saved_env; uint32_t ret; saved_env = env; diff --git a/target-i386/translate.c b/target-i386/translate.c index 860b4a3..c1ede1a 100644 --- a/target-i386/translate.c +++ b/target-i386/translate.c @@ -388,7 +388,7 @@ static inline void gen_op_addl_T0_T1(void) static inline void gen_op_jmp_T0(void) { - tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip)); + tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip)); } static inline void gen_op_add_reg_im(int size, int reg, int32_t val) @@ -453,12 +453,12 @@ static inline void gen_op_addl_A0_reg_sN(int shift, int reg) static inline void gen_op_movl_A0_seg(int reg) { - tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET); + tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET); } static inline void gen_op_addl_A0_seg(int reg) { - tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base)); + tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base)); tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); #ifdef TARGET_X86_64 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff); @@ -468,12 +468,12 @@ static inline void gen_op_addl_A0_seg(int reg) #ifdef TARGET_X86_64 static inline void gen_op_movq_A0_seg(int reg) { - tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base)); + tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base)); } static inline void gen_op_addq_A0_seg(int reg) { - tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base)); + tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base)); tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0); } @@ -583,7 +583,7 @@ static inline void gen_op_st_T1_A0(int idx) static inline void gen_jmp_im(target_ulong pc) { tcg_gen_movi_tl(cpu_tmp0, pc); - tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip)); + tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip)); } static inline void gen_string_movl_A0_ESI(DisasContext *s) @@ -644,7 +644,7 @@ static inline void gen_string_movl_A0_EDI(DisasContext *s) static inline void gen_op_movl_T0_Dshift(int ot) { - tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df)); + tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df)); tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot); }; @@ -6466,11 +6466,11 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start) break; case 0xfc: /* cld */ tcg_gen_movi_i32(cpu_tmp2_i32, 1); - tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df)); + tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); break; case 0xfd: /* std */ tcg_gen_movi_i32(cpu_tmp2_i32, -1); - tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df)); + tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df)); break; /************************/ @@ -7645,64 +7645,64 @@ void optimize_flags_init(void) { cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0, - offsetof(CPUState, cc_op), "cc_op"); - cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_src), + offsetof(CPUX86State, cc_op), "cc_op"); + cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src), "cc_src"); - cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_dst), + cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst), "cc_dst"); - cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_tmp), + cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_tmp), "cc_tmp"); #ifdef TARGET_X86_64 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, regs[R_EAX]), "rax"); + offsetof(CPUX86State, regs[R_EAX]), "rax"); cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, regs[R_ECX]), "rcx"); + offsetof(CPUX86State, regs[R_ECX]), "rcx"); cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, regs[R_EDX]), "rdx"); + offsetof(CPUX86State, regs[R_EDX]), "rdx"); cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, regs[R_EBX]), "rbx"); + offsetof(CPUX86State, regs[R_EBX]), "rbx"); cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, regs[R_ESP]), "rsp"); + offsetof(CPUX86State, regs[R_ESP]), "rsp"); cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, regs[R_EBP]), "rbp"); + offsetof(CPUX86State, regs[R_EBP]), "rbp"); cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, regs[R_ESI]), "rsi"); + offsetof(CPUX86State, regs[R_ESI]), "rsi"); cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, regs[R_EDI]), "rdi"); + offsetof(CPUX86State, regs[R_EDI]), "rdi"); cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, regs[8]), "r8"); + offsetof(CPUX86State, regs[8]), "r8"); cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, regs[9]), "r9"); + offsetof(CPUX86State, regs[9]), "r9"); cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, regs[10]), "r10"); + offsetof(CPUX86State, regs[10]), "r10"); cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, regs[11]), "r11"); + offsetof(CPUX86State, regs[11]), "r11"); cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, regs[12]), "r12"); + offsetof(CPUX86State, regs[12]), "r12"); cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, regs[13]), "r13"); + offsetof(CPUX86State, regs[13]), "r13"); cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, regs[14]), "r14"); + offsetof(CPUX86State, regs[14]), "r14"); cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0, - offsetof(CPUState, regs[15]), "r15"); + offsetof(CPUX86State, regs[15]), "r15"); #else cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0, - offsetof(CPUState, regs[R_EAX]), "eax"); + offsetof(CPUX86State, regs[R_EAX]), "eax"); cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0, - offsetof(CPUState, regs[R_ECX]), "ecx"); + offsetof(CPUX86State, regs[R_ECX]), "ecx"); cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0, - offsetof(CPUState, regs[R_EDX]), "edx"); + offsetof(CPUX86State, regs[R_EDX]), "edx"); cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0, - offsetof(CPUState, regs[R_EBX]), "ebx"); + offsetof(CPUX86State, regs[R_EBX]), "ebx"); cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0, - offsetof(CPUState, regs[R_ESP]), "esp"); + offsetof(CPUX86State, regs[R_ESP]), "esp"); cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0, - offsetof(CPUState, regs[R_EBP]), "ebp"); + offsetof(CPUX86State, regs[R_EBP]), "ebp"); cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0, - offsetof(CPUState, regs[R_ESI]), "esi"); + offsetof(CPUX86State, regs[R_ESI]), "esi"); cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0, - offsetof(CPUState, regs[R_EDI]), "edi"); + offsetof(CPUX86State, regs[R_EDI]), "edi"); #endif /* register helpers */ @@ -7713,7 +7713,7 @@ void optimize_flags_init(void) /* generate intermediate code in gen_opc_buf and gen_opparam_buf for basic block 'tb'. If search_pc is TRUE, also generate PC information for each intermediate instruction. */ -static inline void gen_intermediate_code_internal(CPUState *env, +static inline void gen_intermediate_code_internal(CPUX86State *env, TranslationBlock *tb, int search_pc) { @@ -7890,17 +7890,17 @@ static inline void gen_intermediate_code_internal(CPUState *env, } } -void gen_intermediate_code(CPUState *env, TranslationBlock *tb) +void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb) { gen_intermediate_code_internal(env, tb, 0); } -void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb) +void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb) { gen_intermediate_code_internal(env, tb, 1); } -void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos) +void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos) { int cc_op; #ifdef DEBUG_DISAS -- 1.7.7 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html