We want all accelerators to share the same opaque pointer in CPUState. Rename the 'hvf_vcpu_state' structure as 'AccelvCPUState'. Use the generic 'accel' field of CPUState instead of 'hvf'. Replace g_malloc0() by g_new0() for readability. Signed-off-by: Philippe Mathieu-Daudé <philmd@xxxxxxxxxx> --- include/hw/core/cpu.h | 3 -- include/sysemu/hvf_int.h | 2 +- accel/hvf/hvf-accel-ops.c | 16 ++++----- target/arm/hvf/hvf.c | 70 +++++++++++++++++++-------------------- 4 files changed, 44 insertions(+), 47 deletions(-) diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 8d27861ed5..1dc5efe650 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -236,7 +236,6 @@ typedef struct SavedIOTLB { struct KVMState; struct kvm_run; struct AccelvCPUState; -struct hvf_vcpu_state; /* work queue */ @@ -442,8 +441,6 @@ struct CPUState { /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */ bool prctl_unalign_sigbus; - struct hvf_vcpu_state *hvf; - /* track IOMMUs whose translations we've cached in the TCG TLB */ GArray *iommu_notifiers; }; diff --git a/include/sysemu/hvf_int.h b/include/sysemu/hvf_int.h index 6545f7cd61..96ef51f4df 100644 --- a/include/sysemu/hvf_int.h +++ b/include/sysemu/hvf_int.h @@ -48,7 +48,7 @@ struct HVFState { }; extern HVFState *hvf_state; -struct hvf_vcpu_state { +struct AccelvCPUState { uint64_t fd; void *exit; bool vtimer_masked; diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c index 24913ca9c4..06ca1d59a4 100644 --- a/accel/hvf/hvf-accel-ops.c +++ b/accel/hvf/hvf-accel-ops.c @@ -363,19 +363,19 @@ type_init(hvf_type_init); static void hvf_vcpu_destroy(CPUState *cpu) { - hv_return_t ret = hv_vcpu_destroy(cpu->hvf->fd); + hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd); assert_hvf_ok(ret); hvf_arch_vcpu_destroy(cpu); - g_free(cpu->hvf); - cpu->hvf = NULL; + g_free(cpu->accel); + cpu->accel = NULL; } static int hvf_init_vcpu(CPUState *cpu) { int r; - cpu->hvf = g_malloc0(sizeof(*cpu->hvf)); + cpu->accel = g_new0(struct AccelvCPUState, 1); /* init cpu signals */ struct sigaction sigact; @@ -384,13 +384,13 @@ static int hvf_init_vcpu(CPUState *cpu) sigact.sa_handler = dummy_signal; sigaction(SIG_IPI, &sigact, NULL); - pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask); - sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI); + pthread_sigmask(SIG_BLOCK, NULL, &cpu->accel->unblock_ipi_mask); + sigdelset(&cpu->accel->unblock_ipi_mask, SIG_IPI); #ifdef __aarch64__ - r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL); + r = hv_vcpu_create(&cpu->accel->fd, (hv_vcpu_exit_t **)&cpu->accel->exit, NULL); #else - r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT); + r = hv_vcpu_create((hv_vcpuid_t *)&cpu->accel->fd, HV_VCPU_DEFAULT); #endif cpu->vcpu_dirty = 1; assert_hvf_ok(r); diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c index ad65603445..b85648b61c 100644 --- a/target/arm/hvf/hvf.c +++ b/target/arm/hvf/hvf.c @@ -366,29 +366,29 @@ int hvf_get_registers(CPUState *cpu) int i; for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) { - ret = hv_vcpu_get_reg(cpu->hvf->fd, hvf_reg_match[i].reg, &val); + ret = hv_vcpu_get_reg(cpu->accel->fd, hvf_reg_match[i].reg, &val); *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val; assert_hvf_ok(ret); } for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) { - ret = hv_vcpu_get_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg, + ret = hv_vcpu_get_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg, &fpval); memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval)); assert_hvf_ok(ret); } val = 0; - ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPCR, &val); + ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPCR, &val); assert_hvf_ok(ret); vfp_set_fpcr(env, val); val = 0; - ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPSR, &val); + ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_FPSR, &val); assert_hvf_ok(ret); vfp_set_fpsr(env, val); - ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_CPSR, &val); + ret = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_CPSR, &val); assert_hvf_ok(ret); pstate_write(env, val); @@ -397,7 +397,7 @@ int hvf_get_registers(CPUState *cpu) continue; } - ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val); + ret = hv_vcpu_get_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, &val); assert_hvf_ok(ret); arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val; @@ -420,24 +420,24 @@ int hvf_put_registers(CPUState *cpu) for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) { val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset); - ret = hv_vcpu_set_reg(cpu->hvf->fd, hvf_reg_match[i].reg, val); + ret = hv_vcpu_set_reg(cpu->accel->fd, hvf_reg_match[i].reg, val); assert_hvf_ok(ret); } for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) { memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval)); - ret = hv_vcpu_set_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg, + ret = hv_vcpu_set_simd_fp_reg(cpu->accel->fd, hvf_fpreg_match[i].reg, fpval); assert_hvf_ok(ret); } - ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPCR, vfp_get_fpcr(env)); + ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPCR, vfp_get_fpcr(env)); assert_hvf_ok(ret); - ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPSR, vfp_get_fpsr(env)); + ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_FPSR, vfp_get_fpsr(env)); assert_hvf_ok(ret); - ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_CPSR, pstate_read(env)); + ret = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_CPSR, pstate_read(env)); assert_hvf_ok(ret); aarch64_save_sp(env, arm_current_el(env)); @@ -449,11 +449,11 @@ int hvf_put_registers(CPUState *cpu) } val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx]; - ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val); + ret = hv_vcpu_set_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, val); assert_hvf_ok(ret); } - ret = hv_vcpu_set_vtimer_offset(cpu->hvf->fd, hvf_state->vtimer_offset); + ret = hv_vcpu_set_vtimer_offset(cpu->accel->fd, hvf_state->vtimer_offset); assert_hvf_ok(ret); return 0; @@ -474,7 +474,7 @@ static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val) flush_cpu_state(cpu); if (rt < 31) { - r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_X0 + rt, val); + r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_X0 + rt, val); assert_hvf_ok(r); } } @@ -487,7 +487,7 @@ static uint64_t hvf_get_reg(CPUState *cpu, int rt) flush_cpu_state(cpu); if (rt < 31) { - r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_X0 + rt, &val); + r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_X0 + rt, &val); assert_hvf_ok(r); } @@ -629,22 +629,22 @@ int hvf_arch_init_vcpu(CPUState *cpu) assert(write_cpustate_to_list(arm_cpu, false)); /* Set CP_NO_RAW system registers on init */ - ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MIDR_EL1, + ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MIDR_EL1, arm_cpu->midr); assert_hvf_ok(ret); - ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MPIDR_EL1, + ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_MPIDR_EL1, arm_cpu->mp_affinity); assert_hvf_ok(ret); - ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr); + ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr); assert_hvf_ok(ret); pfr |= env->gicv3state ? (1 << 24) : 0; - ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr); + ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr); assert_hvf_ok(ret); /* We're limited to underlying hardware caps, override internal versions */ - ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1, + ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1, &arm_cpu->isar.id_aa64mmfr0); assert_hvf_ok(ret); @@ -654,7 +654,7 @@ int hvf_arch_init_vcpu(CPUState *cpu) void hvf_kick_vcpu_thread(CPUState *cpu) { cpus_kick_thread(cpu); - hv_vcpus_exit(&cpu->hvf->fd, 1); + hv_vcpus_exit(&cpu->accel->fd, 1); } static void hvf_raise_exception(CPUState *cpu, uint32_t excp, @@ -1191,13 +1191,13 @@ static int hvf_inject_interrupts(CPUState *cpu) { if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) { trace_hvf_inject_fiq(); - hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_FIQ, + hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_FIQ, true); } if (cpu->interrupt_request & CPU_INTERRUPT_HARD) { trace_hvf_inject_irq(); - hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_IRQ, + hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_IRQ, true); } @@ -1231,7 +1231,7 @@ static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts) */ qatomic_mb_set(&cpu->thread_kicked, false); qemu_mutex_unlock_iothread(); - pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask); + pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask); qemu_mutex_lock_iothread(); } @@ -1252,7 +1252,7 @@ static void hvf_wfi(CPUState *cpu) return; } - r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); + r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); assert_hvf_ok(r); if (!(ctl & 1) || (ctl & 2)) { @@ -1261,7 +1261,7 @@ static void hvf_wfi(CPUState *cpu) return; } - r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval); + r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval); assert_hvf_ok(r); ticks_to_sleep = cval - hvf_vtimer_val(); @@ -1294,12 +1294,12 @@ static void hvf_sync_vtimer(CPUState *cpu) uint64_t ctl; bool irq_state; - if (!cpu->hvf->vtimer_masked) { + if (!cpu->accel->vtimer_masked) { /* We will get notified on vtimer changes by hvf, nothing to do */ return; } - r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); + r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); assert_hvf_ok(r); irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) == @@ -1308,8 +1308,8 @@ static void hvf_sync_vtimer(CPUState *cpu) if (!irq_state) { /* Timer no longer asserting, we can unmask it */ - hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false); - cpu->hvf->vtimer_masked = false; + hv_vcpu_set_vtimer_mask(cpu->accel->fd, false); + cpu->accel->vtimer_masked = false; } } @@ -1317,7 +1317,7 @@ int hvf_vcpu_exec(CPUState *cpu) { ARMCPU *arm_cpu = ARM_CPU(cpu); CPUARMState *env = &arm_cpu->env; - hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit; + hv_vcpu_exit_t *hvf_exit = cpu->accel->exit; hv_return_t r; bool advance_pc = false; @@ -1332,7 +1332,7 @@ int hvf_vcpu_exec(CPUState *cpu) flush_cpu_state(cpu); qemu_mutex_unlock_iothread(); - assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd)); + assert_hvf_ok(hv_vcpu_run(cpu->accel->fd)); /* handle VMEXIT */ uint64_t exit_reason = hvf_exit->reason; @@ -1346,7 +1346,7 @@ int hvf_vcpu_exec(CPUState *cpu) break; case HV_EXIT_REASON_VTIMER_ACTIVATED: qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1); - cpu->hvf->vtimer_masked = true; + cpu->accel->vtimer_masked = true; return 0; case HV_EXIT_REASON_CANCELED: /* we got kicked, no exit to process */ @@ -1457,10 +1457,10 @@ int hvf_vcpu_exec(CPUState *cpu) flush_cpu_state(cpu); - r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_PC, &pc); + r = hv_vcpu_get_reg(cpu->accel->fd, HV_REG_PC, &pc); assert_hvf_ok(r); pc += 4; - r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc); + r = hv_vcpu_set_reg(cpu->accel->fd, HV_REG_PC, pc); assert_hvf_ok(r); } -- 2.38.1