On 08/01/18 20:08, Paolo Bonzini wrote:
From: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx> Ensure an IBPB (Indirect branch prediction barrier) before every VCPU switch. Signed-off-by: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx> Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> --- arch/x86/kvm/vmx.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d00bcad7336e..bf127c570675 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2375,6 +2375,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; vmcs_load(vmx->loaded_vmcs->vmcs); + if (have_spec_ctrl) + wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB); } if (!already_loaded) { @@ -4029,6 +4031,13 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs) free_vmcs(loaded_vmcs->vmcs); loaded_vmcs->vmcs = NULL; WARN_ON(loaded_vmcs->shadow_vmcs != NULL); + + /* + * The VMCS could be recycled, causing a false negative in + * vmx_vcpu_load; block speculative execution. + */ + if (have_spec_ctrl) + wrmsrl(MSR_IA32_PRED_CMD, FEATURE_SET_IBPB); } static void vmx_nested_free_vmcs02(struct vcpu_vmx *vmx)
Reviewed-by: Liran Alon <liran.alon@xxxxxxxxxx>