.... > +static int private_memslot_fault(struct kvm_vcpu *vcpu, > + phys_addr_t fault_ipa, > + struct kvm_memory_slot *memslot) > +{ > + struct kvm *kvm = vcpu->kvm; > + gpa_t gpa = kvm_gpa_from_fault(kvm, fault_ipa); > + gfn_t gfn = gpa >> PAGE_SHIFT; > + bool priv_exists = kvm_mem_is_private(kvm, gfn); > + struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; > + kvm_pfn_t pfn; > + int ret; > + /* > + * For Realms, the shared address is an alias of the private GPA with > + * the top bit set. Thus is the fault address matches the GPA then it > + * is the private alias. > + */ > + bool is_priv_gfn = (gpa == fault_ipa); > + > + if (priv_exists != is_priv_gfn) { > + kvm_prepare_memory_fault_exit(vcpu, > + gpa, > + PAGE_SIZE, > + kvm_is_write_fault(vcpu), > + false, is_priv_gfn); > + > + return -EFAULT; > + } > If we want an exit to VMM and handle the fault, should we have the return value 0? For kvmtool we do have the KVM_RUN ioctl doing the below err = ioctl(vcpu->vcpu_fd, KVM_RUN, 0); if (err < 0 && (errno != EINTR && errno != EAGAIN)) die_perror("KVM_RUN failed"); Qemu did end up adding the below condition. if (!(run_ret == -EFAULT && run->exit_reason == KVM_EXIT_MEMORY_FAULT)) { fprintf(stderr, "error: kvm run failed %s\n", strerror(-run_ret)); so should we fix kvmtool. We may possibly want to add other exit_reason and it would be useful to not require similar VMM changes for these exit_reason. > + > + if (!is_priv_gfn) { > + /* Not a private mapping, handling normally */ > + return -EINVAL; > + } > + -aneesh