Master clock update sets and then clears a request which is not handled by KVM as a means to block and then allow guest entries. Extract this to kvm_allow_guest_entries/kvm_block_guest_entries. No functional change intended. Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 5 +++-- arch/x86/kvm/x86.c | 25 +++++++++++++++---------- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index e11d64aa0bcd..cadb09c6fb0e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -68,7 +68,7 @@ #define KVM_REQ_PMI KVM_ARCH_REQ(11) #define KVM_REQ_SMI KVM_ARCH_REQ(12) #define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13) -#define KVM_REQ_MCLOCK_INPROGRESS \ +#define KVM_REQ_BLOCK_GUEST_ENTRIES \ KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_SCAN_IOAPIC \ KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) @@ -1831,7 +1831,8 @@ u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier); unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); -void kvm_make_mclock_inprogress_request(struct kvm *kvm); +void kvm_block_guest_entries(struct kvm *kvm); +void kvm_allow_guest_entries(struct kvm *kvm); void kvm_make_scan_ioapic_request(struct kvm *kvm); void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, unsigned long *vcpu_bitmap); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 76dae88cf524..9af2fbbe0521 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2735,9 +2735,18 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm) #endif } -void kvm_make_mclock_inprogress_request(struct kvm *kvm) +void kvm_block_guest_entries(struct kvm *kvm) { - kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); + kvm_make_all_cpus_request(kvm, KVM_REQ_BLOCK_GUEST_ENTRIES); +} + +void kvm_allow_guest_entries(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + int i; + + kvm_for_each_vcpu(i, vcpu, kvm) + kvm_clear_request(KVM_REQ_BLOCK_GUEST_ENTRIES, vcpu); } static void kvm_gen_update_masterclock(struct kvm *kvm) @@ -2750,9 +2759,8 @@ static void kvm_gen_update_masterclock(struct kvm *kvm) kvm_hv_invalidate_tsc_page(kvm); - kvm_make_mclock_inprogress_request(kvm); + kvm_block_guest_entries(kvm); - /* no guest entries from this point */ spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); pvclock_update_vm_gtod_copy(kvm); spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); @@ -2760,9 +2768,7 @@ static void kvm_gen_update_masterclock(struct kvm *kvm) kvm_for_each_vcpu(i, vcpu, kvm) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); - /* guest entries allowed */ - kvm_for_each_vcpu(i, vcpu, kvm) - kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); + kvm_allow_guest_entries(kvm); #endif } @@ -8051,7 +8057,7 @@ static void kvm_hyperv_tsc_notifier(void) mutex_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) - kvm_make_mclock_inprogress_request(kvm); + kvm_block_guest_entries(kvm); hyperv_stop_tsc_emulation(); @@ -8070,8 +8076,7 @@ static void kvm_hyperv_tsc_notifier(void) kvm_for_each_vcpu(cpu, vcpu, kvm) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); - kvm_for_each_vcpu(cpu, vcpu, kvm) - kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); + kvm_allow_guest_entries(kvm); } mutex_unlock(&kvm_lock); } -- 2.26.3