Instead of using a atomic operation per active request, use just one to get all requests at once, then check them with local ops. This probably isn't any faster, since simultaneous requests are rare, but it does reduce code size. Signed-off-by: Avi Kivity <avi@xxxxxxxxxx> --- arch/x86/kvm/x86.c | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 953e692..c0209eb 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5232,55 +5232,58 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && vcpu->run->request_interrupt_window; bool req_immediate_exit = 0; + ulong reqs; if (unlikely(req_int_win)) kvm_make_request(KVM_REQ_EVENT, vcpu); if (vcpu->requests) { - if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) + reqs = xchg(&vcpu->requests, 0UL); + + if (test_bit(KVM_REQ_MMU_RELOAD, &reqs)) kvm_mmu_unload(vcpu); - if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) + if (test_bit(KVM_REQ_MIGRATE_TIMER, &reqs)) __kvm_migrate_timers(vcpu); - if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { + if (test_bit(KVM_REQ_CLOCK_UPDATE, &reqs)) { r = kvm_guest_time_update(vcpu); if (unlikely(r)) goto out; } - if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) + if (test_bit(KVM_REQ_MMU_SYNC, &reqs)) kvm_mmu_sync_roots(vcpu); - if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) + if (test_bit(KVM_REQ_TLB_FLUSH, &reqs)) kvm_x86_ops->tlb_flush(vcpu); - if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { + if (test_bit(KVM_REQ_REPORT_TPR_ACCESS, &reqs)) { vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; r = 0; goto out; } - if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { + if (test_bit(KVM_REQ_TRIPLE_FAULT, &reqs)) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; r = 0; goto out; } - if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) { + if (test_bit(KVM_REQ_DEACTIVATE_FPU, &reqs)) { vcpu->fpu_active = 0; kvm_x86_ops->fpu_deactivate(vcpu); } - if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { + if (test_bit(KVM_REQ_APF_HALT, &reqs)) { /* Page is swapped out. Do synthetic halt */ vcpu->arch.apf.halted = true; r = 1; goto out; } - if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) + if (test_bit(KVM_REQ_STEAL_UPDATE, &reqs)) record_steal_time(vcpu); - if (kvm_check_request(KVM_REQ_NMI, vcpu)) + if (test_bit(KVM_REQ_NMI, &reqs)) process_nmi(vcpu); req_immediate_exit = - kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu); - if (kvm_check_request(KVM_REQ_PMU, vcpu)) + test_bit(KVM_REQ_IMMEDIATE_EXIT, &reqs); + if (test_bit(KVM_REQ_PMU, &reqs)) kvm_handle_pmu_event(vcpu); - if (kvm_check_request(KVM_REQ_PMI, vcpu)) + if (test_bit(KVM_REQ_PMI, &reqs)) kvm_deliver_pmi(vcpu); - if (kvm_check_request(KVM_REQ_EVENT, vcpu)) { + if (test_bit(KVM_REQ_EVENT, &reqs)) { inject_pending_event(vcpu); /* enable NMI/IRQ window open exits if needed */ -- 1.7.10.1 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html