This is a note to let you know that I've just added the patch titled KVM: Optimize kvm_make_vcpus_request_mask() a bit to the 5.10-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: kvm-optimize-kvm_make_vcpus_request_mask-a-bit.patch and it can be found in the queue-5.10 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 7975423b500182869b3d98ea8dd9f9b2d5cbabe3 Author: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> Date: Fri Sep 3 09:51:37 2021 +0200 KVM: Optimize kvm_make_vcpus_request_mask() a bit [ Upstream commit ae0946cd3601752dc58f86d84258e5361e9c8cd4 ] Iterating over set bits in 'vcpu_bitmap' should be faster than going through all vCPUs, especially when just a few bits are set. Drop kvm_make_vcpus_request_mask() call from kvm_make_all_cpus_request_except() to avoid handling the special case when 'vcpu_bitmap' is NULL, move the code to kvm_make_all_cpus_request_except() itself. Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> Reviewed-by: Sean Christopherson <seanjc@xxxxxxxxxx> Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> Message-Id: <20210903075141.403071-5-vkuznets@xxxxxxxxxx> Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> Stable-dep-of: 2b0128127373 ("KVM: Register /dev/kvm as the _very_ last thing during initialization") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f379398b43d59..34931443dafac 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -264,50 +264,57 @@ static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait) return true; } +static void kvm_make_vcpu_request(struct kvm *kvm, struct kvm_vcpu *vcpu, + unsigned int req, cpumask_var_t tmp, + int current_cpu) +{ + int cpu; + + kvm_make_request(req, vcpu); + + if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) + return; + + /* + * tmp can be "unavailable" if cpumasks are allocated off stack as + * allocation of the mask is deliberately not fatal and is handled by + * falling back to kicking all online CPUs. + */ + if (!cpumask_available(tmp)) + return; + + /* + * Note, the vCPU could get migrated to a different pCPU at any point + * after kvm_request_needs_ipi(), which could result in sending an IPI + * to the previous pCPU. But, that's OK because the purpose of the IPI + * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is + * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES + * after this point is also OK, as the requirement is only that KVM wait + * for vCPUs that were reading SPTEs _before_ any changes were + * finalized. See kvm_vcpu_kick() for more details on handling requests. + */ + if (kvm_request_needs_ipi(vcpu, req)) { + cpu = READ_ONCE(vcpu->cpu); + if (cpu != -1 && cpu != current_cpu) + __cpumask_set_cpu(cpu, tmp); + } +} + bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, struct kvm_vcpu *except, unsigned long *vcpu_bitmap, cpumask_var_t tmp) { - int i, cpu, me; struct kvm_vcpu *vcpu; + int i, me; bool called; me = get_cpu(); - kvm_for_each_vcpu(i, vcpu, kvm) { - if ((vcpu_bitmap && !test_bit(i, vcpu_bitmap)) || - vcpu == except) + for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) { + vcpu = kvm_get_vcpu(kvm, i); + if (!vcpu || vcpu == except) continue; - - kvm_make_request(req, vcpu); - - if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) - continue; - - /* - * tmp can be "unavailable" if cpumasks are allocated off stack - * as allocation of the mask is deliberately not fatal and is - * handled by falling back to kicking all online CPUs. - */ - if (!cpumask_available(tmp)) - continue; - - /* - * Note, the vCPU could get migrated to a different pCPU at any - * point after kvm_request_needs_ipi(), which could result in - * sending an IPI to the previous pCPU. But, that's ok because - * the purpose of the IPI is to ensure the vCPU returns to - * OUTSIDE_GUEST_MODE, which is satisfied if the vCPU migrates. - * Entering READING_SHADOW_PAGE_TABLES after this point is also - * ok, as the requirement is only that KVM wait for vCPUs that - * were reading SPTEs _before_ any changes were finalized. See - * kvm_vcpu_kick() for more details on handling requests. - */ - if (kvm_request_needs_ipi(vcpu, req)) { - cpu = READ_ONCE(vcpu->cpu); - if (cpu != -1 && cpu != me) - __cpumask_set_cpu(cpu, tmp); - } + kvm_make_vcpu_request(kvm, vcpu, req, tmp, me); } called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT)); @@ -319,12 +326,23 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, struct kvm_vcpu *except) { + struct kvm_vcpu *vcpu; cpumask_var_t cpus; bool called; + int i, me; zalloc_cpumask_var(&cpus, GFP_ATOMIC); - called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus); + me = get_cpu(); + + kvm_for_each_vcpu(i, vcpu, kvm) { + if (vcpu == except) + continue; + kvm_make_vcpu_request(kvm, vcpu, req, cpus, me); + } + + called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); + put_cpu(); free_cpumask_var(cpus); return called;