x2APIC supports up to 2^32-1 LAPICs, but most guest in coming years will have slighly less VCPUs. Dynamic size saves memory at the cost of turning one constant into a variable. apic_map mutex had to be moved before allocation to avoid races with cpu hotplug. Signed-off-by: Radim Krčmář <rkrcmar@xxxxxxxxxx> --- v1: use round_up [Peter] arch/x86/include/asm/kvm_host.h | 9 +++-- arch/x86/kvm/lapic.c | 86 ++++++++++++++++++++++------------------- arch/x86/kvm/lapic.h | 2 +- 3 files changed, 53 insertions(+), 44 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 53d39771842b..459a789cb3da 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -682,9 +682,12 @@ struct kvm_arch_memory_slot { struct kvm_apic_map { struct rcu_head rcu; u8 mode; - struct kvm_lapic *phys_map[256]; - /* first index is cluster id second is cpu id in a cluster */ - struct kvm_lapic *logical_map[16][16]; + u32 size; + union { + struct kvm_lapic *xapic_flat_map[8]; + struct kvm_lapic *xapic_cluster_map[16][4]; + }; + struct kvm_lapic *phys_map[]; }; /* Hyper-V emulation context */ diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 238b87b068db..eed5af46e619 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -115,26 +115,31 @@ static inline int apic_enabled(struct kvm_lapic *apic) (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \ APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER) -/* The logical map is definitely wrong if we have multiple - * modes at the same time. (Physical map is always right.) - */ -static inline bool kvm_apic_logical_map_valid(struct kvm_apic_map *map) -{ - return !(map->mode & (map->mode - 1)); -} +static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map, + u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) { + switch (map->mode) { + case KVM_APIC_MODE_X2APIC: { + u32 offset = (dest_id >> 16) * 16; + if (offset < map->size) { + *cluster = &map->phys_map[offset]; + *mask = dest_id & 0xffff; + } else + *mask = 0; -static inline void -apic_logical_id(struct kvm_apic_map *map, u32 dest_id, u16 *cid, u16 *lid) -{ - unsigned lid_bits; - - BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_CLUSTER != 4); - BUILD_BUG_ON(KVM_APIC_MODE_XAPIC_FLAT != 8); - BUILD_BUG_ON(KVM_APIC_MODE_X2APIC != 16); - lid_bits = map->mode; - - *cid = dest_id >> lid_bits; - *lid = dest_id & ((1 << lid_bits) - 1); + return true; + } + case KVM_APIC_MODE_XAPIC_FLAT: + *cluster = map->xapic_flat_map; + *mask = dest_id & 0xff; + return true; + case KVM_APIC_MODE_XAPIC_CLUSTER: + *cluster = map->xapic_cluster_map[dest_id >> 4]; + *mask = dest_id & 0xf; + return true; + default: + /* Not optimized. */ + return false; + } } static void recalculate_apic_map(struct kvm *kvm) @@ -142,17 +147,28 @@ static void recalculate_apic_map(struct kvm *kvm) struct kvm_apic_map *new, *old = NULL; struct kvm_vcpu *vcpu; int i; - - new = kzalloc(sizeof(struct kvm_apic_map), GFP_KERNEL); + u32 size, max_id = 255; mutex_lock(&kvm->arch.apic_map_lock); + kvm_for_each_vcpu(i, vcpu, kvm) + if (kvm_apic_present(vcpu)) + max_id = max(max_id, kvm_apic_id(vcpu->arch.apic)); + + /* kvm_apic_map_get_logical_dest() expects multiples of 16 */ + size = round_up(max_id + 1, 16); + new = kzalloc(sizeof(struct kvm_apic_map) + + sizeof(struct kvm_lapic) * size, GFP_KERNEL); + if (!new) goto out; + new->size = size; + kvm_for_each_vcpu(i, vcpu, kvm) { struct kvm_lapic *apic = vcpu->arch.apic; - u16 cid, lid; + struct kvm_lapic **cluster; + u16 mask; u32 ldr, aid; if (!kvm_apic_present(vcpu)) @@ -161,7 +177,7 @@ static void recalculate_apic_map(struct kvm *kvm) aid = kvm_apic_id(apic); ldr = kvm_lapic_get_reg(apic, APIC_LDR); - if (aid < ARRAY_SIZE(new->phys_map)) + if (aid < size) new->phys_map[aid] = apic; if (apic_x2apic_mode(apic)) { @@ -174,13 +190,11 @@ static void recalculate_apic_map(struct kvm *kvm) new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER; } - if (!kvm_apic_logical_map_valid(new)) + if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask)) continue; - apic_logical_id(new, ldr, &cid, &lid); - - if (lid && cid < ARRAY_SIZE(new->logical_map)) - new->logical_map[cid][ffs(lid) - 1] = apic; + if (mask) + cluster[ffs(mask) - 1] = apic; } out: old = rcu_dereference_protected(kvm->arch.apic_map, @@ -684,7 +698,6 @@ static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm, struct kvm_lapic { int i, lowest; bool x2apic_ipi; - u16 cid; if (irq->shorthand == APIC_DEST_SELF) { *dst = &src; @@ -701,7 +714,7 @@ static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm, struct kvm_lapic return false; if (irq->dest_mode == APIC_DEST_PHYSICAL) { - if (irq->dest_id >= ARRAY_SIZE(map->phys_map)) { + if (irq->dest_id >= map->size) { *bitmap = 0; } else { *dst = &map->phys_map[irq->dest_id]; @@ -710,18 +723,11 @@ static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm, struct kvm_lapic return true; } - if (!kvm_apic_logical_map_valid(map)) + *bitmap = 0; + if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst, + (u16 *)bitmap)) return false; - apic_logical_id(map, irq->dest_id, &cid, (u16 *)bitmap); - - if (cid >= ARRAY_SIZE(map->logical_map)) { - *bitmap = 0; - return true; - } - - *dst = map->logical_map[cid]; - if (!kvm_lowest_prio_delivery(irq)) return true; diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 336ba51bb16e..8d811139d2b3 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -200,7 +200,7 @@ static inline int kvm_lapic_latched_init(struct kvm_vcpu *vcpu) return lapic_in_kernel(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); } -static inline int kvm_apic_id(struct kvm_lapic *apic) +static inline u32 kvm_apic_id(struct kvm_lapic *apic) { return (kvm_lapic_get_reg(apic, APIC_ID) >> 24) & 0xff; } -- 2.9.0 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html