Nicholas Piggin <npiggin@xxxxxxxxx> writes: > In order to support hash guests in the P9 path (which does not do real > mode hcalls or page fault handling), these real-mode hash specific > interrupts need to be implemented in virt mode. > > Signed-off-by: Nicholas Piggin <npiggin@xxxxxxxxx> > --- > arch/powerpc/kvm/book3s_hv.c | 118 +++++++++++++++++++++++++++++++++-- > 1 file changed, 113 insertions(+), 5 deletions(-) > > diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c > index 9d2fa21201c1..1bbc46f2cfbf 100644 > --- a/arch/powerpc/kvm/book3s_hv.c > +++ b/arch/powerpc/kvm/book3s_hv.c > @@ -935,6 +935,52 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) > return RESUME_HOST; > > switch (req) { > + case H_REMOVE: > + ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4), > + kvmppc_get_gpr(vcpu, 5), > + kvmppc_get_gpr(vcpu, 6)); > + if (ret == H_TOO_HARD) > + return RESUME_HOST; > + break; > + case H_ENTER: > + ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4), > + kvmppc_get_gpr(vcpu, 5), > + kvmppc_get_gpr(vcpu, 6), > + kvmppc_get_gpr(vcpu, 7)); > + if (ret == H_TOO_HARD) > + return RESUME_HOST; > + break; > + case H_READ: > + ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4), > + kvmppc_get_gpr(vcpu, 5)); > + if (ret == H_TOO_HARD) > + return RESUME_HOST; > + break; > + case H_CLEAR_MOD: > + ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4), > + kvmppc_get_gpr(vcpu, 5)); > + if (ret == H_TOO_HARD) > + return RESUME_HOST; > + break; > + case H_CLEAR_REF: > + ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4), > + kvmppc_get_gpr(vcpu, 5)); > + if (ret == H_TOO_HARD) > + return RESUME_HOST; > + break; > + case H_PROTECT: > + ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4), > + kvmppc_get_gpr(vcpu, 5), > + kvmppc_get_gpr(vcpu, 6)); > + if (ret == H_TOO_HARD) > + return RESUME_HOST; > + break; > + case H_BULK_REMOVE: > + ret = kvmppc_h_bulk_remove(vcpu); > + if (ret == H_TOO_HARD) > + return RESUME_HOST; > + break; > + Some of these symbols need to be exported. ERROR: modpost: "kvmppc_h_bulk_remove" [arch/powerpc/kvm/kvm-hv.ko] undefined! ERROR: modpost: "kvmppc_h_clear_mod" [arch/powerpc/kvm/kvm-hv.ko] undefined! ERROR: modpost: "kvmppc_xive_xics_hcall" [arch/powerpc/kvm/kvm-hv.ko] undefined! ERROR: modpost: "kvmppc_h_remove" [arch/powerpc/kvm/kvm-hv.ko] undefined! ERROR: modpost: "decrementers_next_tb" [arch/powerpc/kvm/kvm-hv.ko] undefined! ERROR: modpost: "kvmppc_hpte_hv_fault" [arch/powerpc/kvm/kvm-hv.ko] undefined! ERROR: modpost: "kvmppc_h_protect" [arch/powerpc/kvm/kvm-hv.ko] undefined! ERROR: modpost: "kvmppc_h_enter" [arch/powerpc/kvm/kvm-hv.ko] undefined! ERROR: modpost: "kvmppc_h_clear_ref" [arch/powerpc/kvm/kvm-hv.ko] undefined! ERROR: modpost: "kvmppc_h_read" [arch/powerpc/kvm/kvm-hv.ko] undefined! > case H_CEDE: > break; > case H_PROD: > @@ -1134,6 +1180,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) > default: > return RESUME_HOST; > } > + WARN_ON_ONCE(ret == H_TOO_HARD); > kvmppc_set_gpr(vcpu, 3, ret); > vcpu->arch.hcall_needed = 0; > return RESUME_GUEST; > @@ -1420,19 +1467,80 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, > * host page has been paged out. Any other HDSI/HISI interrupts > * have been handled already. > */ > - case BOOK3S_INTERRUPT_H_DATA_STORAGE: > - r = RESUME_PAGE_FAULT; > - if (vcpu->arch.fault_dsisr == HDSISR_CANARY) > + case BOOK3S_INTERRUPT_H_DATA_STORAGE: { > + unsigned long vsid; > + long err; > + > + if (vcpu->arch.fault_dsisr == HDSISR_CANARY) { > r = RESUME_GUEST; /* Just retry if it's the canary */ > + break; > + } > + > + if (kvm_is_radix(vcpu->kvm)) { > + r = RESUME_PAGE_FAULT; > + break; > + } > + > + if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { > + kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); > + r = RESUME_GUEST; > + break; > + } > + if (!(vcpu->arch.shregs.msr & MSR_DR)) { > + vsid = vcpu->kvm->arch.vrma_slb_v; > + } else { > + vsid = vcpu->arch.fault_gpa; > + } > + err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, > + vsid, vcpu->arch.fault_dsisr, true); > + if (err == 0) { > + r = RESUME_GUEST; > + } else if (err == -1 || err == -2) { > + r = RESUME_PAGE_FAULT; > + } else { > + kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dar, err); > + r = RESUME_GUEST; > + } > break; > - case BOOK3S_INTERRUPT_H_INST_STORAGE: > + } > + case BOOK3S_INTERRUPT_H_INST_STORAGE: { > + unsigned long vsid; > + long err; > + > vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); > vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr & > DSISR_SRR1_MATCH_64S; > if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) > vcpu->arch.fault_dsisr |= DSISR_ISSTORE; > - r = RESUME_PAGE_FAULT; > + if (kvm_is_radix(vcpu->kvm)) { > + r = RESUME_PAGE_FAULT; > + break; > + } > + > + if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { > + /* XXX: clear DSISR_ISSTORE? */ > + kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_dsisr); > + r = RESUME_GUEST; > + break; > + } > + if (!(vcpu->arch.shregs.msr & MSR_DR)) { > + vsid = vcpu->kvm->arch.vrma_slb_v; > + } else { > + vsid = vcpu->arch.fault_gpa; > + } > + err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, > + vsid, vcpu->arch.fault_dsisr, false); > + if (err == 0) { > + r = RESUME_GUEST; > + } else if (err == -1) { > + r = RESUME_PAGE_FAULT; > + } else { > + kvmppc_core_queue_inst_storage(vcpu, err); > + r = RESUME_GUEST; > + } > break; > + } > + > /* > * This occurs if the guest executes an illegal instruction. > * If the guest debug is disabled, generate a program interrupt