On Mon, May 04, 2020 at 03:12:36PM +0200, Paolo Bonzini wrote: > On 04/05/20 14:01, Alexander Graf wrote: > > I like the WARN_ON :). It should be almost free during execution, but > > helps us catch problems early. > > Yes, it's nice. I didn't mind the "buddy" argument either, but if we're > going to get a bool I prefer positive logic so I'd like to squash this: I don't love need_ibpb as a param name, it doesn't provide any information as to why the IBPB is needed. But, I can't come up with anything better that isn't absurdly long because e.g. "different_guest" isn't necessarily true in the vmx_vcpu_load() path. What about going the @buddy route and adding the comment and WARN in vmx_vcpu_load_vmcs()? E.g. prev = per_cpu(current_vmcs, cpu); if (prev != vmx->loaded_vmcs->vmcs) { per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; vmcs_load(vmx->loaded_vmcs->vmcs); /* * No indirect branch prediction barrier needed when switching * the active VMCS within a guest, e.g. on nested VM-Enter. * The L1 VMM can protect itself with retpolines, IBPB or IBRS. */ if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)) indirect_branch_prediction_barrier(); } > diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c > index b57420f3dd8f..299393750a18 100644 > --- a/arch/x86/kvm/vmx/nested.c > +++ b/arch/x86/kvm/vmx/nested.c > @@ -304,7 +304,13 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) > prev = vmx->loaded_vmcs; > WARN_ON_ONCE(prev->cpu != cpu || prev->vmcs != per_cpu(current_vmcs, cpu)); > vmx->loaded_vmcs = vmcs; > - vmx_vcpu_load_vmcs(vcpu, cpu, true); > + > + /* > + * This is the same guest from our point of view, so no > + * indirect branch prediction barrier is needed. The L1 > + * guest can protect itself with retpolines, IBPB or IBRS. > + */ > + vmx_vcpu_load_vmcs(vcpu, cpu, false); > vmx_sync_vmcs_host_state(vmx, prev); > put_cpu(); > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index 669e14947ba9..0f9c8d2dd7f6 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -1311,7 +1311,7 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) > pi_set_on(pi_desc); > } > > -void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, bool nested_switch) > +void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, bool need_ibpb) > { > struct vcpu_vmx *vmx = to_vmx(vcpu); > bool already_loaded = vmx->loaded_vmcs->cpu == cpu; > @@ -1336,7 +1336,7 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, bool nested_switch) > if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { > per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; > vmcs_load(vmx->loaded_vmcs->vmcs); > - if (!nested_switch) > + if (need_ibpb) > indirect_branch_prediction_barrier(); > } > > @@ -1378,7 +1378,7 @@ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) > { > struct vcpu_vmx *vmx = to_vmx(vcpu); > > - vmx_vcpu_load_vmcs(vcpu, cpu, false); > + vmx_vcpu_load_vmcs(vcpu, cpu, true); > > vmx_vcpu_pi_load(vcpu, cpu); > > diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h > index fa61dc802183..e584ee9b3e94 100644 > --- a/arch/x86/kvm/vmx/vmx.h > +++ b/arch/x86/kvm/vmx/vmx.h > @@ -320,7 +320,7 @@ struct kvm_vmx { > }; > > bool nested_vmx_allowed(struct kvm_vcpu *vcpu); > -void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, bool nested_switch); > +void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, bool need_ibpb); > void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu); > int allocate_vpid(void); > void free_vpid(int vpid); > > > Paolo >