The return value of kvm_vcpu_block will be repurposed soon to return the state of KVM_REQ_UNHALT. In preparation for that, get rid of the current return value. It is only used by kvm_vcpu_halt to decide whether the call resulted in a wait, but the same effect can be obtained with a single round of polling. No functional change intended, apart from practically indistinguishable changes to the polling behavior. Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> --- include/linux/kvm_host.h | 2 +- virt/kvm/kvm_main.c | 45 +++++++++++++++++----------------------- 2 files changed, 20 insertions(+), 27 deletions(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1c480b1821e1..e7bd48d15db8 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1339,7 +1339,7 @@ void kvm_sigset_activate(struct kvm_vcpu *vcpu); void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); void kvm_vcpu_halt(struct kvm_vcpu *vcpu); -bool kvm_vcpu_block(struct kvm_vcpu *vcpu); +void kvm_vcpu_block(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 515dfe9d3bcf..1f049c1d01b4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3429,10 +3429,9 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) * pending. This is mostly used when halting a vCPU, but may also be used * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI. */ -bool kvm_vcpu_block(struct kvm_vcpu *vcpu) +void kvm_vcpu_block(struct kvm_vcpu *vcpu) { struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); - bool waited = false; vcpu->stat.generic.blocking = 1; @@ -3447,7 +3446,6 @@ bool kvm_vcpu_block(struct kvm_vcpu *vcpu) if (kvm_vcpu_check_block(vcpu) < 0) break; - waited = true; schedule(); } @@ -3457,8 +3455,6 @@ bool kvm_vcpu_block(struct kvm_vcpu *vcpu) preempt_enable(); vcpu->stat.generic.blocking = 0; - - return waited; } static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, @@ -3493,35 +3489,32 @@ void kvm_vcpu_halt(struct kvm_vcpu *vcpu) { bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; - ktime_t start, cur, poll_end; + ktime_t start, cur, poll_end, stop; bool waited = false; u64 halt_ns; start = cur = poll_end = ktime_get(); - if (do_halt_poll) { - ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns); + stop = do_halt_poll ? start : ktime_add_ns(start, vcpu->halt_poll_ns); - do { - /* - * This sets KVM_REQ_UNHALT if an interrupt - * arrives. - */ - if (kvm_vcpu_check_block(vcpu) < 0) - goto out; - cpu_relax(); - poll_end = cur = ktime_get(); - } while (kvm_vcpu_can_poll(cur, stop)); - } + do { + /* + * This sets KVM_REQ_UNHALT if an interrupt + * arrives. + */ + if (kvm_vcpu_check_block(vcpu) < 0) + goto out; + cpu_relax(); + poll_end = cur = ktime_get(); + } while (kvm_vcpu_can_poll(cur, stop)); - waited = kvm_vcpu_block(vcpu); + waited = true; + kvm_vcpu_block(vcpu); cur = ktime_get(); - if (waited) { - vcpu->stat.generic.halt_wait_ns += - ktime_to_ns(cur) - ktime_to_ns(poll_end); - KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist, - ktime_to_ns(cur) - ktime_to_ns(poll_end)); - } + vcpu->stat.generic.halt_wait_ns += + ktime_to_ns(cur) - ktime_to_ns(poll_end); + KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist, + ktime_to_ns(cur) - ktime_to_ns(poll_end)); out: /* The total time the vCPU was "halted", including polling time. */ halt_ns = ktime_to_ns(cur) - ktime_to_ns(start); -- 2.31.1