Implement vcpu handle exit interface, getting the exit code by ESTAT register and using kvm exception vector to handle it. Signed-off-by: Tianrui Zhao <zhaotianrui@xxxxxxxxxxx> --- arch/loongarch/kvm/vcpu.c | 86 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 571ac8b9d..e08a4faa0 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -38,6 +38,92 @@ static int _kvm_check_requests(struct kvm_vcpu *vcpu, int cpu) return ret; } +/* + * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) + */ +static int _kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + unsigned long exst = vcpu->arch.host_estat; + u32 intr = exst & 0x1fff; /* ignore NMI */ + u32 exccode = (exst & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; + u32 __user *opc = (u32 __user *) vcpu->arch.pc; + int ret = RESUME_GUEST, cpu; + + vcpu->mode = OUTSIDE_GUEST_MODE; + + /* Set a default exit reason */ + run->exit_reason = KVM_EXIT_UNKNOWN; + run->ready_for_interrupt_injection = 1; + + /* + * Set the appropriate status bits based on host CPU features, + * before we hit the scheduler + */ + + local_irq_enable(); + + kvm_debug("%s: exst: %lx, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", + __func__, exst, opc, run, vcpu); + trace_kvm_exit(vcpu, exccode); + if (exccode) { + ret = _kvm_handle_fault(vcpu, exccode); + } else { + WARN(!intr, "suspicious vm exiting"); + ++vcpu->stat.int_exits; + + if (need_resched()) + cond_resched(); + + ret = RESUME_GUEST; + } + + cond_resched(); + + local_irq_disable(); + + if (ret == RESUME_GUEST) + kvm_acquire_timer(vcpu); + + if (!(ret & RESUME_HOST)) { + _kvm_deliver_intr(vcpu); + /* Only check for signals if not already exiting to userspace */ + if (signal_pending(current)) { + run->exit_reason = KVM_EXIT_INTR; + ret = (-EINTR << 2) | RESUME_HOST; + ++vcpu->stat.signal_exits; + trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL); + } + } + + if (ret == RESUME_GUEST) { + trace_kvm_reenter(vcpu); + + /* + * Make sure the read of VCPU requests in vcpu_reenter() + * callback is not reordered ahead of the write to vcpu->mode, + * or we could miss a TLB flush request while the requester sees + * the VCPU as outside of guest mode and not needing an IPI. + */ + smp_store_mb(vcpu->mode, IN_GUEST_MODE); + + cpu = smp_processor_id(); + _kvm_check_requests(vcpu, cpu); + _kvm_check_vmid(vcpu, cpu); + vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); + + /* + * If FPU are enabled (i.e. the guest's FPU context + * is live), restore FCSR0. + */ + if (_kvm_guest_has_fpu(&vcpu->arch) && + read_csr_euen() & (CSR_EUEN_FPEN)) { + kvm_restore_fcsr(&vcpu->arch.fpu); + } + } + + return ret; +} + int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { int i; -- 2.31.1