This patch does Hyper-V Synthetic interrupt controller(Hyper-V SynIC) MSR's support and migration. Hyper-V SynIC is enabled by cpu's 'hv-synic' option. This patch does not allow cpu creation if 'hv-synic' option specified but kernel doesn't support Hyper-V SynIC. Changes v3: * removed 'msr_hv_synic_version' migration because it's value always the same * moved SynIC msr's initialization into kvm_arch_init_vcpu Signed-off-by: Andrey Smetanin <asmetanin@xxxxxxxxxxxxx> Reviewed-by: Roman Kagan <rkagan@xxxxxxxxxxxxx> Signed-off-by: Denis V. Lunev <den@xxxxxxxxxx> CC: Paolo Bonzini <pbonzini@xxxxxxxxxx> CC: Richard Henderson <rth@xxxxxxxxxxx> CC: Eduardo Habkost <ehabkost@xxxxxxxxxx> CC: "Andreas Färber" <afaerber@xxxxxxx> CC: Marcelo Tosatti <mtosatti@xxxxxxxxxx> CC: Roman Kagan <rkagan@xxxxxxxxxxxxx> CC: Denis V. Lunev <den@xxxxxxxxxx> CC: kvm@xxxxxxxxxxxxxxx --- target-i386/cpu-qom.h | 1 + target-i386/cpu.c | 1 + target-i386/cpu.h | 5 ++++ target-i386/kvm.c | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++- target-i386/machine.c | 37 +++++++++++++++++++++++++++++ 5 files changed, 109 insertions(+), 1 deletion(-) diff --git a/target-i386/cpu-qom.h b/target-i386/cpu-qom.h index e3bfe9d..7ea5b34 100644 --- a/target-i386/cpu-qom.h +++ b/target-i386/cpu-qom.h @@ -94,6 +94,7 @@ typedef struct X86CPU { bool hyperv_reset; bool hyperv_vpindex; bool hyperv_runtime; + bool hyperv_synic; bool check_cpuid; bool enforce_cpuid; bool expose_kvm; diff --git a/target-i386/cpu.c b/target-i386/cpu.c index e5f1c5b..1462e19 100644 --- a/target-i386/cpu.c +++ b/target-i386/cpu.c @@ -3142,6 +3142,7 @@ static Property x86_cpu_properties[] = { DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false), DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false), DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false), + DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false), DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), diff --git a/target-i386/cpu.h b/target-i386/cpu.h index fc4a605..8cf33df 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -918,6 +918,11 @@ typedef struct CPUX86State { uint64_t msr_hv_tsc; uint64_t msr_hv_crash_params[HV_X64_MSR_CRASH_PARAMS]; uint64_t msr_hv_runtime; + uint64_t msr_hv_synic_control; + uint64_t msr_hv_synic_version; + uint64_t msr_hv_synic_evt_page; + uint64_t msr_hv_synic_msg_page; + uint64_t msr_hv_synic_sint[HV_SYNIC_SINT_COUNT]; /* exception/interrupt handling */ int error_code; diff --git a/target-i386/kvm.c b/target-i386/kvm.c index 2a9953b..3f2ea90 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -86,6 +86,7 @@ static bool has_msr_hv_crash; static bool has_msr_hv_reset; static bool has_msr_hv_vpindex; static bool has_msr_hv_runtime; +static bool has_msr_hv_synic; static bool has_msr_mtrr; static bool has_msr_xss; @@ -521,7 +522,8 @@ static bool hyperv_enabled(X86CPU *cpu) cpu->hyperv_crash || cpu->hyperv_reset || cpu->hyperv_vpindex || - cpu->hyperv_runtime); + cpu->hyperv_runtime || + cpu->hyperv_synic); } static Error *invtsc_mig_blocker; @@ -610,6 +612,21 @@ int kvm_arch_init_vcpu(CPUState *cs) if (cpu->hyperv_runtime && has_msr_hv_runtime) { c->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE; } + if (cpu->hyperv_synic) { + int sint; + + if (!has_msr_hv_synic || + kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) { + fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n"); + return -ENOSYS; + } + + c->eax |= HV_X64_MSR_SYNIC_AVAILABLE; + env->msr_hv_synic_version = HV_SYNIC_VERSION_1; + for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) { + env->msr_hv_synic_sint[sint] = HV_SYNIC_SINT_MASKED; + } + } c = &cpuid_data.entries[cpuid_i++]; c->function = HYPERV_CPUID_ENLIGHTMENT_INFO; if (cpu->hyperv_relaxed_timing) { @@ -950,6 +967,10 @@ static int kvm_get_supported_msrs(KVMState *s) has_msr_hv_runtime = true; continue; } + if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) { + has_msr_hv_synic = true; + continue; + } } } @@ -1511,6 +1532,23 @@ static int kvm_put_msrs(X86CPU *cpu, int level) kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime); } + if (cpu->hyperv_synic) { + int j; + + kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SCONTROL, + env->msr_hv_synic_control); + kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SVERSION, + env->msr_hv_synic_version); + kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIEFP, + env->msr_hv_synic_evt_page); + kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIMP, + env->msr_hv_synic_msg_page); + + for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) { + kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SINT0 + j, + env->msr_hv_synic_sint[j]); + } + } if (has_msr_mtrr) { kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype); kvm_msr_entry_set(&msrs[n++], @@ -1879,6 +1917,17 @@ static int kvm_get_msrs(X86CPU *cpu) if (has_msr_hv_runtime) { msrs[n++].index = HV_X64_MSR_VP_RUNTIME; } + if (cpu->hyperv_synic) { + uint32_t msr; + + msrs[n++].index = HV_X64_MSR_SCONTROL; + msrs[n++].index = HV_X64_MSR_SVERSION; + msrs[n++].index = HV_X64_MSR_SIEFP; + msrs[n++].index = HV_X64_MSR_SIMP; + for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) { + msrs[n++].index = msr; + } + } if (has_msr_mtrr) { msrs[n++].index = MSR_MTRRdefType; msrs[n++].index = MSR_MTRRfix64K_00000; @@ -2035,6 +2084,21 @@ static int kvm_get_msrs(X86CPU *cpu) case HV_X64_MSR_VP_RUNTIME: env->msr_hv_runtime = msrs[i].data; break; + case HV_X64_MSR_SCONTROL: + env->msr_hv_synic_control = msrs[i].data; + break; + case HV_X64_MSR_SVERSION: + env->msr_hv_synic_version = msrs[i].data; + break; + case HV_X64_MSR_SIEFP: + env->msr_hv_synic_evt_page = msrs[i].data; + break; + case HV_X64_MSR_SIMP: + env->msr_hv_synic_msg_page = msrs[i].data; + break; + case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: + env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data; + break; case MSR_MTRRdefType: env->mtrr_deftype = msrs[i].data; break; diff --git a/target-i386/machine.c b/target-i386/machine.c index a18e16e..d41e286 100644 --- a/target-i386/machine.c +++ b/target-i386/machine.c @@ -710,6 +710,42 @@ static const VMStateDescription vmstate_msr_hyperv_runtime = { } }; +static bool hyperv_synic_enable_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + int i; + + if (env->msr_hv_synic_control != 0 || + env->msr_hv_synic_evt_page != 0 || + env->msr_hv_synic_msg_page != 0) { + return true; + } + + for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) { + if (env->msr_hv_synic_sint[i] != 0) { + return true; + } + } + + return false; +} + +static const VMStateDescription vmstate_msr_hyperv_synic = { + .name = "cpu/msr_hyperv_synic", + .version_id = 1, + .minimum_version_id = 1, + .needed = hyperv_synic_enable_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU), + VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU), + VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU), + VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, + HV_SYNIC_SINT_COUNT), + VMSTATE_END_OF_LIST() + } +}; + static bool avx512_needed(void *opaque) { X86CPU *cpu = opaque; @@ -893,6 +929,7 @@ VMStateDescription vmstate_x86_cpu = { &vmstate_msr_hyperv_time, &vmstate_msr_hyperv_crash, &vmstate_msr_hyperv_runtime, + &vmstate_msr_hyperv_synic, &vmstate_avx512, &vmstate_xss, NULL -- 2.4.3 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html