From: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> "Some AMD processors only support a non-architectural means of enabling speculative store bypass disable (SSBD). To allow a simplified view of this to a guest, an architectural definition has been created through a new CPUID bit, 0x80000008_EBX[25], and a new MSR, 0xc001011f. With this, a hypervisor can virtualize the existence of this definition and provide an architectural method for using SSBD to a guest. Add the new CPUID feature, the new MSR and update the existing SSBD support to use this MSR when present." (from x86/speculation: Add virtualized speculative store bypass disable support in Linux). Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx> Reviewed-by: Daniel P. Berrangé <berrange@xxxxxxxxxx> Signed-off-by: Daniel P. Berrangé <berrange@xxxxxxxxxx> --- target/i386/cpu.h | 2 ++ target/i386/kvm.c | 16 ++++++++++++++-- target/i386/machine.c | 20 ++++++++++++++++++++ 3 files changed, 36 insertions(+), 2 deletions(-) diff --git a/target/i386/cpu.h b/target/i386/cpu.h index f0b68905de..8ac13f6c2c 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -351,6 +351,7 @@ typedef enum X86Seg { #define MSR_IA32_FEATURE_CONTROL 0x0000003a #define MSR_TSC_ADJUST 0x0000003b #define MSR_IA32_SPEC_CTRL 0x48 +#define MSR_VIRT_SSBD 0xc001011f #define MSR_IA32_TSCDEADLINE 0x6e0 #define FEATURE_CONTROL_LOCKED (1<<0) @@ -1210,6 +1211,7 @@ typedef struct CPUX86State { uint32_t pkru; uint64_t spec_ctrl; + uint64_t virt_ssbd; /* End of state preserved by INIT (dummy marker). */ struct {} end_init_save; diff --git a/target/i386/kvm.c b/target/i386/kvm.c index d6666a4b19..0c656a91a4 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -93,6 +93,7 @@ static bool has_msr_hv_frequencies; static bool has_msr_hv_reenlightenment; static bool has_msr_xss; static bool has_msr_spec_ctrl; +static bool has_msr_virt_ssbd; static bool has_msr_smi_count; static uint32_t has_architectural_pmu_version; @@ -1233,6 +1234,9 @@ static int kvm_get_supported_msrs(KVMState *s) case MSR_IA32_SPEC_CTRL: has_msr_spec_ctrl = true; break; + case MSR_VIRT_SSBD: + has_msr_virt_ssbd = true; + break; } } } @@ -1721,6 +1725,10 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (has_msr_spec_ctrl) { kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl); } + if (has_msr_virt_ssbd) { + kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd); + } + #ifdef TARGET_X86_64 if (lm_capable_kernel) { kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar); @@ -2100,8 +2108,9 @@ static int kvm_get_msrs(X86CPU *cpu) if (has_msr_spec_ctrl) { kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0); } - - + if (has_msr_virt_ssbd) { + kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0); + } if (!env->tsc_valid) { kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0); env->tsc_valid = !runstate_is_running(); @@ -2481,6 +2490,9 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_IA32_SPEC_CTRL: env->spec_ctrl = msrs[i].data; break; + case MSR_VIRT_SSBD: + env->virt_ssbd = msrs[i].data; + break; case MSR_IA32_RTIT_CTL: env->msr_rtit_ctrl = msrs[i].data; break; diff --git a/target/i386/machine.c b/target/i386/machine.c index fd99c0bbb4..4d98d367c1 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -916,6 +916,25 @@ static const VMStateDescription vmstate_msr_intel_pt = { } }; +static bool virt_ssbd_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return env->virt_ssbd != 0; +} + +static const VMStateDescription vmstate_msr_virt_ssbd = { + .name = "cpu/virt_ssbd", + .version_id = 1, + .minimum_version_id = 1, + .needed = virt_ssbd_needed, + .fields = (VMStateField[]){ + VMSTATE_UINT64(env.virt_ssbd, X86CPU), + VMSTATE_END_OF_LIST() + } +}; + VMStateDescription vmstate_x86_cpu = { .name = "cpu", .version_id = 12, @@ -1039,6 +1058,7 @@ VMStateDescription vmstate_x86_cpu = { &vmstate_spec_ctrl, &vmstate_mcg_ext_ctl, &vmstate_msr_intel_pt, + &vmstate_msr_virt_ssbd, NULL } }; -- 2.17.0