The AVIC physical APIC ID table entry contains the host physical APIC ID field, which the hardware uses to keep track of where each vCPU is running. Originally, the field is an 8-bit value, which can only support physical APIC ID up to 255. To support system with larger APIC ID, the AVIC hardware extends this field to support up to the largest possible physical APIC ID available on the system. Therefore, replace the hard-coded mask value with the value calculated from the maximum possible physical APIC ID in the system. Reviewed-by: Tom Lendacky <thomas.lendacky@xxxxxxx> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> --- arch/x86/kvm/svm/avic.c | 53 ++++++++++++++++++++++++++++++++++------- arch/x86/kvm/svm/svm.c | 6 +++++ arch/x86/kvm/svm/svm.h | 2 +- 3 files changed, 52 insertions(+), 9 deletions(-) diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 8052d92069e0..0b073f63dabd 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -19,6 +19,7 @@ #include <linux/amd-iommu.h> #include <linux/kvm_host.h> +#include <asm/apic.h> #include <asm/irq_remapping.h> #include "trace.h" @@ -63,6 +64,7 @@ static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS); static u32 next_vm_id = 0; static bool next_vm_id_wrapped = 0; +static u64 avic_host_physical_id_mask; static DEFINE_SPINLOCK(svm_vm_data_hash_lock); /* @@ -133,6 +135,46 @@ void avic_vm_destroy(struct kvm *kvm) spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags); } +int avic_init_host_physical_apicid_mask(void) +{ + unsigned int eax, ebx, ecx, edx; + u32 level_type, core_mask_width, max_phys_mask_width; + + /* + * Calculate minimum number of bits required to represent + * host physical APIC ID for each processor (level type 2) + * using CPUID leaf 0xb sub-leaf 0x1. + */ + cpuid_count(0xb, 0x1, &eax, &ebx, &ecx, &edx); + level_type = (ecx >> 8) & 0xff; + + /* + * If level-type 2 (i.e. processor type) not available, + * or host is in xAPIC mode, default to only 8-bit mask. + */ + if (level_type != 2 || !x2apic_mode) { + avic_host_physical_id_mask = 0xffULL; + goto out; + } + + core_mask_width = eax & 0xF; + + max_phys_mask_width = get_count_order(apic_get_max_phys_apicid()); + + /* + * Sanity check to ensure core_mask_width for a processor does not + * exceed the calculated mask. + */ + if (WARN_ON(core_mask_width > max_phys_mask_width)) + return -EINVAL; + + avic_host_physical_id_mask = BIT(max_phys_mask_width) - 1; +out: + pr_debug("Using AVIC host physical APIC ID mask %#0llx\n", + avic_host_physical_id_mask); + return 0; +} + int avic_vm_init(struct kvm *kvm) { unsigned long flags; @@ -943,22 +985,17 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r) void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { u64 entry; - /* ID = 0xff (broadcast), ID > 0xff (reserved) */ int h_physical_id = kvm_cpu_get_apicid(cpu); struct vcpu_svm *svm = to_svm(vcpu); - /* - * Since the host physical APIC id is 8 bits, - * we can support host APIC ID upto 255. - */ - if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK)) + if (WARN_ON(h_physical_id > avic_host_physical_id_mask)) return; entry = READ_ONCE(*(svm->avic_physical_id_cache)); WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK); - entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK; - entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK); + entry &= ~avic_host_physical_id_mask; + entry |= h_physical_id; entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK; if (svm->avic_is_running) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 989685098b3e..0b066bb5149d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1031,6 +1031,12 @@ static __init int svm_hardware_setup(void) nrips = false; } + if (avic) { + r = avic_init_host_physical_apicid_mask(); + if (r) + avic = false; + } + enable_apicv = avic = avic && npt_enabled && boot_cpu_has(X86_FEATURE_AVIC); if (enable_apicv) { diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 5d30db599e10..e215092d0411 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -497,7 +497,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops; #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31) -#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL) #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12) #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62) #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63) @@ -525,6 +524,7 @@ int avic_init_vcpu(struct vcpu_svm *svm); void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void avic_vcpu_put(struct kvm_vcpu *vcpu); void avic_post_state_restore(struct kvm_vcpu *vcpu); +int avic_init_host_physical_apicid_mask(void); void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu); void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); bool svm_check_apicv_inhibit_reasons(ulong bit); -- 2.25.1