The kvm struct has been bloating. For example, it's tens of kilo-bytes for x86, which turns out to be a large amount of memory to allocate contiguously via kzalloc. Thus, this patch does the following: 1. Uses architecture-specific routines to allocat the kvm struct via kzalloc for x86 and also for arm when has_tbe() is true. 2. Introduces a baseline allocator for the kvm struct that uses valloc. This allocator can be used by an architecture by defining the __KVM_VALLOC_ARCH_VM macro before including include/linux/kvm_host.h. 3. Finally, continue to default to kalloc for all other situations, as many architectures have taken a dependence on kalloc. Signed-off-by: Marc Orr <marcorr@xxxxxxxxxx> Change-Id: Ide392e51991142d1325e06351231ac685e11d820 --- arch/arm/include/asm/kvm_host.h | 4 ++++ arch/arm64/include/asm/kvm_host.h | 4 ++++ arch/x86/kvm/svm.c | 4 ++-- arch/x86/kvm/vmx.c | 4 ++-- include/linux/kvm_host.h | 13 +++++++++++++ virt/kvm/arm/arm.c | 15 +++++++++++++++ 6 files changed, 40 insertions(+), 4 deletions(-) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index c7c28c885a19..f38fd60b4d4d 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -318,4 +318,8 @@ static inline bool kvm_arm_harden_branch_predictor(void) static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {} +#define __KVM_HAVE_ARCH_VM_ALLOC +struct kvm *kvm_arch_alloc_vm(void); +void kvm_arch_free_vm(struct kvm *kvm); + #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 469de8acd06f..358b1b4dcd92 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -455,4 +455,8 @@ static inline bool kvm_arm_harden_branch_predictor(void) void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu); void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu); +#define __KVM_HAVE_ARCH_VM_ALLOC +struct kvm *kvm_arch_alloc_vm(void); +void kvm_arch_free_vm(struct kvm *kvm); + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 1fc05e428aba..45b903ed3e5d 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1849,13 +1849,13 @@ static void __unregister_enc_region_locked(struct kvm *kvm, static struct kvm *svm_vm_alloc(void) { - struct kvm_svm *kvm_svm = kzalloc(sizeof(struct kvm_svm), GFP_KERNEL); + struct kvm_svm *kvm_svm = vzalloc(sizeof(struct kvm_svm)); return &kvm_svm->kvm; } static void svm_vm_free(struct kvm *kvm) { - kfree(to_kvm_svm(kvm)); + vfree(to_kvm_svm(kvm)); } static void sev_vm_destroy(struct kvm *kvm) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 3f1696570b41..ee85ba887db6 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -9936,13 +9936,13 @@ STACK_FRAME_NON_STANDARD(vmx_vcpu_run); static struct kvm *vmx_vm_alloc(void) { - struct kvm_vmx *kvm_vmx = kzalloc(sizeof(struct kvm_vmx), GFP_KERNEL); + struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx)); return &kvm_vmx->kvm; } static void vmx_vm_free(struct kvm *kvm) { - kfree(to_kvm_vmx(kvm)); + vfree(to_kvm_vmx(kvm)); } static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 6d6e79c59e68..e3134283321a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -19,6 +19,7 @@ #include <linux/preempt.h> #include <linux/msi.h> #include <linux/slab.h> +#include <linux/vmalloc.h> #include <linux/rcupdate.h> #include <linux/ratelimit.h> #include <linux/err.h> @@ -808,6 +809,17 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); #ifndef __KVM_HAVE_ARCH_VM_ALLOC +#ifdef __KVM_VALLOC_ARCH_VM +static inline struct kvm *kvm_arch_alloc_vm(void) +{ + return vzalloc(sizeof(struct kvm)); +} + +static inline void kvm_arch_free_vm(struct kvm *kvm) +{ + vfree(kvm); +} +#else static inline struct kvm *kvm_arch_alloc_vm(void) { return kzalloc(sizeof(struct kvm), GFP_KERNEL); @@ -818,6 +830,7 @@ static inline void kvm_arch_free_vm(struct kvm *kvm) kfree(kvm); } #endif +#endif #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA void kvm_arch_register_noncoherent_dma(struct kvm *kvm); diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index a4c1b76240df..ee96f18f852a 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -249,6 +249,21 @@ long kvm_arch_dev_ioctl(struct file *filp, return -EINVAL; } +struct kvm *kvm_arch_alloc_vm(void) +{ + if (!has_vhe()) + return kzalloc(sizeof(struct kvm), GFP_KERNEL); + + return vzalloc(sizeof(struct kvm)); +} + +void kvm_arch_free_vm(struct kvm *kvm) +{ + if (!has_vhe()) + kfree(kvm); + else + vfree(kvm); +} struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { -- 2.17.0.441.gb46fe60e1d-goog