On Fri, Aug 27, 2021, Vitaly Kuznetsov wrote: > Allocating cpumask dynamically in zalloc_cpumask_var() is not ideal. > Allocation is somewhat slow and can (in theory and when CPUMASK_OFFSTACK) > fail. kvm_make_all_cpus_request_except() already disables preemption so > we can use pre-allocated per-cpu cpumasks instead. > > Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> > --- > virt/kvm/kvm_main.c | 29 +++++++++++++++++++++++------ > 1 file changed, 23 insertions(+), 6 deletions(-) > > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > index 2e9927c4eb32..2f5fe4f54a51 100644 > --- a/virt/kvm/kvm_main.c > +++ b/virt/kvm/kvm_main.c > @@ -155,6 +155,8 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); > static unsigned long long kvm_createvm_count; > static unsigned long long kvm_active_vms; > > +static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); > + > __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, > unsigned long start, unsigned long end) > { > @@ -323,14 +325,15 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, > struct kvm_vcpu *except) > { > struct kvm_vcpu *vcpu; > - cpumask_var_t cpus; > + struct cpumask *cpus; > bool called; > int i, me; > > - zalloc_cpumask_var(&cpus, GFP_ATOMIC); > - > me = get_cpu(); > > + cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); > + cpumask_clear(cpus); > + > kvm_for_each_vcpu(i, vcpu, kvm) { > if (vcpu == except) > continue; > @@ -340,7 +343,6 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, > called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); > put_cpu(); > > - free_cpumask_var(cpus); > return called; > } > > @@ -5581,9 +5583,15 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, > goto out_free_3; > } > > + for_each_possible_cpu(cpu) { > + if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu), > + GFP_KERNEL, cpu_to_node(cpu))) > + goto out_free_4; 'r' needs to be explicitly set to -EFAULT, e.g. in the current code it's guaranteed to be 0 here. > + } > + > r = kvm_async_pf_init(); > if (r) > - goto out_free; > + goto out_free_5; > > kvm_chardev_ops.owner = module; > kvm_vm_fops.owner = module; > @@ -5609,7 +5617,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, > > out_unreg: > kvm_async_pf_deinit(); > -out_free: > +out_free_5: > + for_each_possible_cpu(cpu) { Unnecessary braces. > + free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); > + } > +out_free_4: > kmem_cache_destroy(kvm_vcpu_cache); > out_free_3: > unregister_reboot_notifier(&kvm_reboot_notifier); > @@ -5629,8 +5641,13 @@ EXPORT_SYMBOL_GPL(kvm_init); > > void kvm_exit(void) > { > + int cpu; > + > debugfs_remove_recursive(kvm_debugfs_dir); > misc_deregister(&kvm_dev); > + for_each_possible_cpu(cpu) { Same here. > + free_cpumask_var(per_cpu(cpu_kick_mask, cpu)); > + } > kmem_cache_destroy(kvm_vcpu_cache); > kvm_async_pf_deinit(); > unregister_syscore_ops(&kvm_syscore_ops); > -- > 2.31.1 >