> On Jun 1, 2023, at 9:05 PM, Sean Christopherson <seanjc@xxxxxxxxxx> wrote: > > Replace an #ifdef on CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS with a > cpu_feature_enabled() check on X86_FEATURE_PKU. The macro magic of > DISABLED_MASK_BIT_SET() means that cpu_feature_enabled() provides the > same end result (no code generated) when PKU is disabled by Kconfig. > > No functional change intended. > > Cc: Jon Kohler <jon@xxxxxxxxxxx> > Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> > --- > arch/x86/kvm/x86.c | 8 ++------ > 1 file changed, 2 insertions(+), 6 deletions(-) > > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index ceb7c5e9cf9e..eed1f0629023 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -1017,13 +1017,11 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) > wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); > } > > -#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS > - if (static_cpu_has(X86_FEATURE_PKU) && > + if (cpu_feature_enabled(X86_FEATURE_PKU) && > vcpu->arch.pkru != vcpu->arch.host_pkru && > ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || > kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) > write_pkru(vcpu->arch.pkru); > -#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ > } > EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state); > > @@ -1032,15 +1030,13 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) > if (vcpu->arch.guest_state_protected) > return; > > -#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS > - if (static_cpu_has(X86_FEATURE_PKU) && > + if (cpu_feature_enabled(X86_FEATURE_PKU) && > ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || > kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) { > vcpu->arch.pkru = rdpkru(); > if (vcpu->arch.pkru != vcpu->arch.host_pkru) > write_pkru(vcpu->arch.host_pkru); > } > -#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ > > if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) { > > > base-commit: a053a0e4a9f8c52f3acf8a9d2520c4bf39077a7e > -- > 2.41.0.rc2.161.g9c6817b8e7-goog > Thanks for the cleanup! Reviewed-by: Jon Kohler <jon@xxxxxxxxxxx>