On Fri, Feb 14, 2020 at 05:44:41PM +0800, Xiaoyao Li wrote: > On 2/2/2020 2:51 AM, Sean Christopherson wrote: > >@@ -387,7 +388,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry) > > entry->ebx |= F(TSC_ADJUST); > > entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; > >- f_la57 = entry->ecx & F(LA57); > >+ f_la57 = cpuid_entry_get(entry, X86_FEATURE_LA57); Note, cpuid_entry_get() is used here. > > cpuid_mask(&entry->ecx, CPUID_7_ECX); > > /* Set LA57 based on hardware capability. */ > > entry->ecx |= f_la57; > >diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h > >index 72a79bdfed6b..64e96e4086e2 100644 > >--- a/arch/x86/kvm/cpuid.h > >+++ b/arch/x86/kvm/cpuid.h > >@@ -95,16 +95,10 @@ static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature) > > return reverse_cpuid[x86_leaf]; > > } > >-static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature) > >+static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, > >+ const struct cpuid_reg *cpuid) > > { > >- struct kvm_cpuid_entry2 *entry; > >- const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); > >- > >- entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index); > >- if (!entry) > >- return NULL; > >- > >- switch (cpuid.reg) { > >+ switch (cpuid->reg) { > > case CPUID_EAX: > > return &entry->eax; > > case CPUID_EBX: > >@@ -119,6 +113,40 @@ static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsi > > } > > } > >+static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, > >+ unsigned x86_feature) > >+{ > >+ const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); > >+ > >+ return __cpuid_entry_get_reg(entry, &cpuid); > >+} > >+ > >+static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry, > >+ unsigned x86_feature) > >+{ > >+ u32 *reg = cpuid_entry_get_reg(entry, x86_feature); > >+ > >+ return *reg & __feature_bit(x86_feature); > >+} > >+ > > This helper function is unnecessary. There is only one user throughout this > series, i.e., cpuid_entry_has() below. And the LA57 case above. > And I cannot image other possible use case of it. The LA57 case, which admittedly goes away soon, was subtle enough (OR in the flag instead of querying yes/no) that I wanted keep the accessor around in case a similar case popped up in the future. > >+static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry, > >+ unsigned x86_feature) > >+{ > >+ return cpuid_entry_get(entry, x86_feature); > >+} > >+ > >+static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature) > ^ > Should be u32 > otherwise, previous patch will be unhappy. :) Doh, thanks! > >+{ > >+ struct kvm_cpuid_entry2 *entry; > >+ const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); > >+ > >+ entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index); > >+ if (!entry) > >+ return NULL; > >+ > >+ return __cpuid_entry_get_reg(entry, &cpuid); > >+} > >+ > > static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature) > > { > > u32 *reg; > > >