Implement KVM_CAP_MEMORY_FAULT_INFO for uaccess failures within kvm_vcpu_read_guest_page(). Signed-off-by: Anish Moorthy <amoorthy@xxxxxxxxxx> --- virt/kvm/kvm_main.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ea27a8178f1a..b9d2606f9251 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2986,9 +2986,12 @@ static int next_segment(unsigned long len, int offset) /* * Copy 'len' bytes from guest memory at '(gfn * PAGE_SIZE) + offset' to 'data' + * If 'vcpu' is non-null, then may fill its run struct for a + * KVM_EXIT_MEMORY_FAULT on uaccess failure. */ -static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, - void *data, int offset, int len) +static int __kvm_read_guest_page(struct kvm_memory_slot *slot, + struct kvm_vcpu *vcpu, + gfn_t gfn, void *data, int offset, int len) { int r; unsigned long addr; @@ -2997,8 +3000,12 @@ static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn, if (kvm_is_error_hva(addr)) return -EFAULT; r = __copy_from_user(data, (void __user *)addr + offset, len); - if (r) + if (r) { + if (vcpu) + kvm_populate_efault_info(vcpu, gfn * PAGE_SIZE + offset, + len, 0); return -EFAULT; + } return 0; } @@ -3007,7 +3014,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, { struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); - return __kvm_read_guest_page(slot, gfn, data, offset, len); + return __kvm_read_guest_page(slot, NULL, gfn, data, offset, len); } EXPORT_SYMBOL_GPL(kvm_read_guest_page); @@ -3016,7 +3023,7 @@ int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, { struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); - return __kvm_read_guest_page(slot, gfn, data, offset, len); + return __kvm_read_guest_page(slot, vcpu, gfn, data, offset, len); } EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page); -- 2.41.0.rc0.172.g3f132b7071-goog