On Mon, Feb 26, 2024 at 12:26:39AM -0800, isaku.yamahata@xxxxxxxxx wrote: >@@ -10162,18 +10151,49 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) > > WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ); > vcpu->arch.complete_userspace_io = complete_hypercall_exit; >+ /* stat is incremented on completion. */ Perhaps we could use a distinct return value to signal that the request is redirected to userspace. This way, more cases can be supported, e.g., accesses to MTRR MSRs, requests to service TDs, etc. And then ... > return 0; > } > default: > ret = -KVM_ENOSYS; > break; > } >+ > out: >+ ++vcpu->stat.hypercalls; >+ return ret; >+} >+EXPORT_SYMBOL_GPL(__kvm_emulate_hypercall); >+ >+int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) >+{ >+ unsigned long nr, a0, a1, a2, a3, ret; >+ int op_64_bit; >+ int cpl; >+ >+ if (kvm_xen_hypercall_enabled(vcpu->kvm)) >+ return kvm_xen_hypercall(vcpu); >+ >+ if (kvm_hv_hypercall_enabled(vcpu)) >+ return kvm_hv_hypercall(vcpu); >+ >+ nr = kvm_rax_read(vcpu); >+ a0 = kvm_rbx_read(vcpu); >+ a1 = kvm_rcx_read(vcpu); >+ a2 = kvm_rdx_read(vcpu); >+ a3 = kvm_rsi_read(vcpu); >+ op_64_bit = is_64_bit_hypercall(vcpu); >+ cpl = static_call(kvm_x86_get_cpl)(vcpu); >+ >+ ret = __kvm_emulate_hypercall(vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl); >+ if (nr == KVM_HC_MAP_GPA_RANGE && !ret) >+ /* MAP_GPA tosses the request to the user space. */ no need to check what the request is. Just checking the return value will suffice. >+ return 0; >+ > if (!op_64_bit) > ret = (u32)ret; > kvm_rax_write(vcpu, ret); > >- ++vcpu->stat.hypercalls; > return kvm_skip_emulated_instruction(vcpu); > } > EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); >-- >2.25.1 > >