From: Sean Christopherson <seanjc@xxxxxxxxxx> commit 19979fba9bfaeab427a8e106d915f0627c952828 upstream. Remove the disabling of page faults across kvm_steal_time_set_preempted() as KVM now accesses the steal time struct (shared with the guest) via a cached mapping (see commit b043138246a4, "x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not missed".) The cache lookup is flagged as atomic, thus it would be a bug if KVM tried to resolve a new pfn, i.e. we want the splat that would be reached via might_fault(). Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> Message-Id: <20210123000334.3123628-2-seanjc@xxxxxxxxxx> Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> Signed-off-by: Rishabh Bhatnagar <risbhat@xxxxxxxxxx> Tested-by: Allen Pais <apais@xxxxxxxxxxxxxxxxxxx> Acked-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- arch/x86/kvm/x86.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b0379ff4b743..4e77233880b7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4127,15 +4127,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) if (vcpu->preempted) vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); - /* - * Disable page faults because we're in atomic context here. - * kvm_write_guest_offset_cached() would call might_fault() - * that relies on pagefault_disable() to tell if there's a - * bug. NOTE: the write to guest memory may not go through if - * during postcopy live migration or if there's heavy guest - * paging. - */ - pagefault_disable(); /* * kvm_memslots() will be called by * kvm_write_guest_offset_cached() so take the srcu lock. @@ -4143,7 +4134,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_steal_time_set_preempted(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); - pagefault_enable(); kvm_x86_ops.vcpu_put(vcpu); vcpu->arch.last_host_tsc = rdtsc(); /* -- 2.39.2