Now that KVM sets vcpu->arch.cpuid_{entries,nent} before processing the incoming CPUID entries during KVM_SET_CPUID{,2}, drop the @entries and @nent params from cpuid_entry2_find() and unconditionally operate on the vCPU state. No functional change intended. Reviewed-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- arch/x86/kvm/cpuid.c | 62 +++++++++++++++----------------------------- 1 file changed, 21 insertions(+), 41 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index b402b9f59cbb..af5c66408c78 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -70,8 +70,8 @@ u32 xstate_required_size(u64 xstate_bv, bool compacted) */ #define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull -static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( - struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index) +static struct kvm_cpuid_entry2 *cpuid_entry2_find(struct kvm_vcpu *vcpu, + u32 function, u64 index) { struct kvm_cpuid_entry2 *e; int i; @@ -88,8 +88,8 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( */ lockdep_assert_irqs_enabled(); - for (i = 0; i < nent; i++) { - e = &entries[i]; + for (i = 0; i < vcpu->arch.cpuid_nent; i++) { + e = &vcpu->arch.cpuid_entries[i]; if (e->function != function) continue; @@ -123,8 +123,6 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( static int kvm_check_cpuid(struct kvm_vcpu *vcpu) { - struct kvm_cpuid_entry2 *entries = vcpu->arch.cpuid_entries; - int nent = vcpu->arch.cpuid_nent; struct kvm_cpuid_entry2 *best; u64 xfeatures; @@ -132,7 +130,7 @@ static int kvm_check_cpuid(struct kvm_vcpu *vcpu) * The existing code assumes virtual address is 48-bit or 57-bit in the * canonical address checks; exit if it is ever changed. */ - best = cpuid_entry2_find(entries, nent, 0x80000008, + best = cpuid_entry2_find(vcpu, 0x80000008, KVM_CPUID_INDEX_NOT_SIGNIFICANT); if (best) { int vaddr_bits = (best->eax & 0xff00) >> 8; @@ -145,7 +143,7 @@ static int kvm_check_cpuid(struct kvm_vcpu *vcpu) * Exposing dynamic xfeatures to the guest requires additional * enabling in the FPU, e.g. to expand the guest XSAVE state size. */ - best = cpuid_entry2_find(entries, nent, 0xd, 0); + best = cpuid_entry2_find(vcpu, 0xd, 0); if (!best) return 0; @@ -191,15 +189,15 @@ static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 return 0; } -static struct kvm_hypervisor_cpuid __kvm_get_hypervisor_cpuid(struct kvm_cpuid_entry2 *entries, - int nent, const char *sig) +static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu, + const char *sig) { struct kvm_hypervisor_cpuid cpuid = {}; struct kvm_cpuid_entry2 *entry; u32 base; for_each_possible_hypervisor_cpuid_base(base) { - entry = cpuid_entry2_find(entries, nent, base, KVM_CPUID_INDEX_NOT_SIGNIFICANT); + entry = cpuid_entry2_find(vcpu, base, KVM_CPUID_INDEX_NOT_SIGNIFICANT); if (entry) { u32 signature[3]; @@ -219,13 +217,6 @@ static struct kvm_hypervisor_cpuid __kvm_get_hypervisor_cpuid(struct kvm_cpuid_e return cpuid; } -static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu, - const char *sig) -{ - return __kvm_get_hypervisor_cpuid(vcpu->arch.cpuid_entries, - vcpu->arch.cpuid_nent, sig); -} - static u32 kvm_apply_cpuid_pv_features_quirk(struct kvm_vcpu *vcpu) { struct kvm_hypervisor_cpuid kvm_cpuid; @@ -249,23 +240,22 @@ static u32 kvm_apply_cpuid_pv_features_quirk(struct kvm_vcpu *vcpu) * Calculate guest's supported XCR0 taking into account guest CPUID data and * KVM's supported XCR0 (comprised of host's XCR0 and KVM_SUPPORTED_XCR0). */ -static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent) +static u64 cpuid_get_supported_xcr0(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; - best = cpuid_entry2_find(entries, nent, 0xd, 0); + best = cpuid_entry2_find(vcpu, 0xd, 0); if (!best) return 0; return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0; } -static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries, - int nent) +void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; - best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); + best = cpuid_entry2_find(vcpu, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); if (best) { /* Update OSXSAVE bit */ if (boot_cpu_has(X86_FEATURE_XSAVE)) @@ -276,43 +266,36 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE); } - best = cpuid_entry2_find(entries, nent, 7, 0); + best = cpuid_entry2_find(vcpu, 7, 0); if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) cpuid_entry_change(best, X86_FEATURE_OSPKE, kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)); - best = cpuid_entry2_find(entries, nent, 0xD, 0); + best = cpuid_entry2_find(vcpu, 0xD, 0); if (best) best->ebx = xstate_required_size(vcpu->arch.xcr0, false); - best = cpuid_entry2_find(entries, nent, 0xD, 1); + best = cpuid_entry2_find(vcpu, 0xD, 1); if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) || cpuid_entry_has(best, X86_FEATURE_XSAVEC))) best->ebx = xstate_required_size(vcpu->arch.xcr0, true); if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) { - best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); + best = cpuid_entry2_find(vcpu, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); if (best) cpuid_entry_change(best, X86_FEATURE_MWAIT, vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT); } } - -void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu) -{ - __kvm_update_cpuid_runtime(vcpu, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent); -} EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime); static bool kvm_cpuid_has_hyperv(struct kvm_vcpu *vcpu) { #ifdef CONFIG_KVM_HYPERV - struct kvm_cpuid_entry2 *entries = vcpu->arch.cpuid_entries; - int nent = vcpu->arch.cpuid_nent; struct kvm_cpuid_entry2 *entry; - entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE, + entry = cpuid_entry2_find(vcpu, HYPERV_CPUID_INTERFACE, KVM_CPUID_INDEX_NOT_SIGNIFICANT); return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX; #else @@ -370,8 +353,7 @@ void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) kvm_apic_set_version(vcpu); } - vcpu->arch.guest_supported_xcr0 = - cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent); + vcpu->arch.guest_supported_xcr0 = cpuid_get_supported_xcr0(vcpu); vcpu->arch.pv_cpuid.features = kvm_apply_cpuid_pv_features_quirk(vcpu); @@ -1756,16 +1738,14 @@ int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, u32 function, u32 index) { - return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent, - function, index); + return cpuid_entry2_find(vcpu, function, index); } EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index); struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function) { - return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent, - function, KVM_CPUID_INDEX_NOT_SIGNIFICANT); + return cpuid_entry2_find(vcpu, function, KVM_CPUID_INDEX_NOT_SIGNIFICANT); } EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); -- 2.47.0.338.g60cca15819-goog