4.17-stable review patch. If anyone has any objections, please let me know. ------------------ From: Nicolai Stange <nstange@xxxxxxx> commit 5b6ccc6c3b1a477fbac9ec97a0b4c1c48e765209 upstream Currently, vmx_vcpu_run() checks if l1tf_flush_l1d is set and invokes vmx_l1d_flush() if so. This test is unncessary for the "always flush L1D" mode. Move the check to vmx_l1d_flush()'s conditional mode code path. Notes: - vmx_l1d_flush() is likely to get inlined anyway and thus, there's no extra function call. - This inverts the (static) branch prediction, but there hadn't been any explicit likely()/unlikely() annotations before and so it stays as is. Signed-off-by: Nicolai Stange <nstange@xxxxxxx> Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/x86/kvm/vmx.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -9463,12 +9463,16 @@ static void vmx_l1d_flush(struct kvm_vcp * 'always' */ if (static_branch_likely(&vmx_l1d_flush_cond)) { + bool flush_l1d = vcpu->arch.l1tf_flush_l1d; + /* * Clear the flush bit, it gets set again either from * vcpu_run() or from one of the unsafe VMEXIT * handlers. */ vcpu->arch.l1tf_flush_l1d = false; + if (!flush_l1d) + return; } vcpu->stat.l1d_flush++; @@ -10003,10 +10007,8 @@ static void __noclone vmx_vcpu_run(struc evmcs_rsp = static_branch_unlikely(&enable_evmcs) ? (unsigned long)¤t_evmcs->host_rsp : 0; - if (static_branch_unlikely(&vmx_l1d_should_flush)) { - if (vcpu->arch.l1tf_flush_l1d) - vmx_l1d_flush(vcpu); - } + if (static_branch_unlikely(&vmx_l1d_should_flush)) + vmx_l1d_flush(vcpu); asm( /* Store host registers */