On 30/03/19 12:20, Borislav Petkov wrote: > @@ -2252,7 +2252,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) > for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) > rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); > > - if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { > + if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) { > u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio; > if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) { > __this_cpu_write(current_tsc_ratio, tsc_ratio); > @@ -2260,7 +2260,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) > } > } > /* This assumes that the kernel never uses MSR_TSC_AUX */ > - if (static_cpu_has(X86_FEATURE_RDTSCP)) > + if (boot_cpu_has(X86_FEATURE_RDTSCP)) > wrmsrl(MSR_TSC_AUX, svm->tsc_aux); > > if (sd->current_vmcb != svm->vmcb) { > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index c73375e01ab8..0cb0d26564ca 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -6423,7 +6423,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) > if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) > vmx_set_interrupt_shadow(vcpu, 0); > > - if (static_cpu_has(X86_FEATURE_PKU) && > + if (boot_cpu_has(X86_FEATURE_PKU) && > kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && > vcpu->arch.pkru != vmx->host_pkru) > __write_pkru(vcpu->arch.pkru); > @@ -6512,7 +6512,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) > * back on host, so it is safe to read guest PKRU from current > * XSAVE. > */ > - if (static_cpu_has(X86_FEATURE_PKU) && > + if (boot_cpu_has(X86_FEATURE_PKU) && > kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) { > vcpu->arch.pkru = __read_pkru(); These are not slow path. Paolo