As part of KVM_RUN, we may need to write steal-time information to guest memory. In the case that the gfn we are writing to is userfault-enabled, we should return to userspace with fault information. With asynchronous userfaults, this change is not necessary and merely acts as an optimization. Signed-off-by: James Houghton <jthoughton@xxxxxxxxxx> --- arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/kvm/arm.c | 8 ++++++-- arch/arm64/kvm/pvtime.c | 11 +++++++++-- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 36b8e97bf49e..4c7bd72ba9e8 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1166,7 +1166,7 @@ static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu); gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu); -void kvm_update_stolen_time(struct kvm_vcpu *vcpu); +int kvm_update_stolen_time(struct kvm_vcpu *vcpu); bool kvm_arm_pvtime_supported(void); int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu, diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 59716789fe0f..4c7994e44217 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -974,8 +974,12 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) */ kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu); - if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu)) - kvm_update_stolen_time(vcpu); + if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu)) { + int ret = kvm_update_stolen_time(vcpu); + + if (ret <= 0) + return ret; + } if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) { /* The distributor enable bits were changed */ diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c index 4ceabaa4c30b..ba0164726310 100644 --- a/arch/arm64/kvm/pvtime.c +++ b/arch/arm64/kvm/pvtime.c @@ -10,7 +10,7 @@ #include <kvm/arm_hypercalls.h> -void kvm_update_stolen_time(struct kvm_vcpu *vcpu) +int kvm_update_stolen_time(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; u64 base = vcpu->arch.steal.base; @@ -20,9 +20,14 @@ void kvm_update_stolen_time(struct kvm_vcpu *vcpu) int idx; if (base == INVALID_GPA) - return; + return 1; idx = srcu_read_lock(&kvm->srcu); + if (gfn_to_hva(kvm, base + offset) == KVM_HVA_ERR_USERFAULT) { + kvm_prepare_memory_fault_exit(vcpu, base + offset, PAGE_SIZE, + true, false, false, true); + return -EFAULT; + } if (!kvm_get_guest(kvm, base + offset, steal)) { steal = le64_to_cpu(steal); vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay); @@ -30,6 +35,8 @@ void kvm_update_stolen_time(struct kvm_vcpu *vcpu) kvm_put_guest(kvm, base + offset, cpu_to_le64(steal)); } srcu_read_unlock(&kvm->srcu, idx); + + return 1; } long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu) -- 2.45.2.993.g49e7a77208-goog