Prepare support of very large vcpu numbers per guest by moving the vcpu pointer array out of struct kvm. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> --- V2: - rebase to new kvm_arch_free_vm() implementation --- arch/arm64/kvm/arm.c | 21 +++++++++++++++++++-- arch/x86/include/asm/kvm_host.h | 5 +---- arch/x86/kvm/x86.c | 18 ++++++++++++++++++ include/linux/kvm_host.h | 17 +++++++++++++++-- 4 files changed, 53 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 38fff5963d9f..8bb5caeba007 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -293,10 +293,27 @@ long kvm_arch_dev_ioctl(struct file *filp, struct kvm *kvm_arch_alloc_vm(void) { + struct kvm *kvm; + + if (!has_vhe()) + kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); + else + kvm = vzalloc(sizeof(struct kvm)); + + if (!kvm) + return NULL; + if (!has_vhe()) - return kzalloc(sizeof(struct kvm), GFP_KERNEL); + kvm->vcpus = kcalloc(KVM_MAX_VCPUS, sizeof(void *), GFP_KERNEL); + else + kvm->vcpus = vzalloc(KVM_MAX_VCPUS * sizeof(void *)); + + if (!kvm->vcpus) { + kvm_arch_free_vm(kvm); + kvm = NULL; + } - return vzalloc(sizeof(struct kvm)); + return kvm; } int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f16fadfc030a..6c28d0800208 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1517,10 +1517,7 @@ static inline void kvm_ops_static_call_update(void) } #define __KVM_HAVE_ARCH_VM_ALLOC -static inline struct kvm *kvm_arch_alloc_vm(void) -{ - return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); -} +struct kvm *kvm_arch_alloc_vm(void); #define __KVM_HAVE_ARCH_VM_FREE void kvm_arch_free_vm(struct kvm *kvm); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cc552763f0e4..ff142b6dd00c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11126,6 +11126,24 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) static_call(kvm_x86_sched_in)(vcpu, cpu); } +struct kvm *kvm_arch_alloc_vm(void) +{ + struct kvm *kvm; + + kvm = __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!kvm) + return NULL; + + kvm->vcpus = __vmalloc(KVM_MAX_VCPUS * sizeof(void *), + GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!kvm->vcpus) { + vfree(kvm); + kvm = NULL; + } + + return kvm; +} + void kvm_arch_free_vm(struct kvm *kvm) { kfree(to_kvm_hv(kvm)->hv_pa_pg); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d75e9c2a00b1..9e2a5f1c6f54 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -536,7 +536,7 @@ struct kvm { struct mutex slots_arch_lock; struct mm_struct *mm; /* userspace tied to this vm */ struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; - struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; + struct kvm_vcpu **vcpus; /* * created_vcpus is protected by kvm->lock, and is incremented @@ -1042,12 +1042,25 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm); */ static inline struct kvm *kvm_arch_alloc_vm(void) { - return kzalloc(sizeof(struct kvm), GFP_KERNEL); + struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); + + if (!kvm) + return NULL; + + kvm->vcpus = kcalloc(KVM_MAX_VCPUS, sizeof(void *), GFP_KERNEL); + if (!kvm->vcpus) { + kfree(kvm); + kvm = NULL; + } + + return kvm; } #endif static inline void __kvm_arch_free_vm(struct kvm *kvm) { + if (kvm) + kvfree(kvm->vcpus); kvfree(kvm); } -- 2.26.2 _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm