Pass the hypervisor (H)SRR1[PREFIX] indication through to synchronous interrupts injected into the guest. Signed-off-by: Nicholas Piggin <npiggin@xxxxxxxxx> --- arch/powerpc/kvm/book3s_64_mmu_radix.c | 13 +++++++++---- arch/powerpc/kvm/book3s_hv.c | 27 +++++++++++++++++--------- arch/powerpc/kvm/book3s_hv_nested.c | 9 ++++++--- arch/powerpc/kvm/emulate_loadstore.c | 6 +++--- arch/powerpc/kvm/powerpc.c | 3 ++- 5 files changed, 38 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 215a6b5ba104..461307b89c3a 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -954,7 +954,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, if (dsisr & DSISR_BADACCESS) { /* Reflect to the guest as DSI */ pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); - kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, dsisr); return RESUME_GUEST; } @@ -979,7 +981,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, * Bad address in guest page table tree, or other * unusual error - reflect it to the guest as DSI. */ - kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, dsisr); return RESUME_GUEST; } return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing); @@ -988,8 +992,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, if (memslot->flags & KVM_MEM_READONLY) { if (writing) { /* give the guest a DSI */ - kvmppc_core_queue_data_storage(vcpu, 0, ea, - DSISR_ISSTORE | DSISR_PROTFAULT); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, DSISR_ISSTORE | DSISR_PROTFAULT); return RESUME_GUEST; } kvm_ro = true; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 16ea0ffb7976..c973bf556fb3 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1428,7 +1428,8 @@ static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu) vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); return RESUME_HOST; } else { - kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); return RESUME_GUEST; } } @@ -1632,7 +1633,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, * so that it knows that the machine check occurred. */ if (!vcpu->kvm->arch.fwnmi_enabled) { - ulong flags = vcpu->arch.shregs.msr & 0x083c0000; + ulong flags = (vcpu->arch.shregs.msr & 0x083c0000) | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED); kvmppc_core_queue_machine_check(vcpu, flags); r = RESUME_GUEST; break; @@ -1661,7 +1663,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, * as a result of a hypervisor emulation interrupt * (e40) getting turned into a 700 by BML RTAS. */ - flags = vcpu->arch.shregs.msr & 0x1f0000ull; + flags = (vcpu->arch.shregs.msr & 0x1f0000ull) | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED); kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; break; @@ -1741,7 +1744,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, } if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { - kvmppc_core_queue_data_storage(vcpu, 0, + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); r = RESUME_GUEST; break; @@ -1759,7 +1763,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, } else if (err == -1 || err == -2) { r = RESUME_PAGE_FAULT; } else { - kvmppc_core_queue_data_storage(vcpu, 0, + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, vcpu->arch.fault_dar, err); r = RESUME_GUEST; } @@ -1787,7 +1792,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { kvmppc_core_queue_inst_storage(vcpu, - vcpu->arch.fault_dsisr); + vcpu->arch.fault_dsisr | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; break; } @@ -1804,7 +1810,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, } else if (err == -1) { r = RESUME_PAGE_FAULT; } else { - kvmppc_core_queue_inst_storage(vcpu, err); + kvmppc_core_queue_inst_storage(vcpu, + err | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; } break; @@ -1825,7 +1832,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { r = kvmppc_emulate_debug_inst(vcpu); } else { - kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; } break; @@ -1866,7 +1874,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, r = kvmppc_tm_unavailable(vcpu); } if (r == EMULATE_FAIL) { - kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; } break; diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 2c9db6119d89..377d0b4a05ee 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -1560,7 +1560,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) { /* unusual error -> reflect to the guest as a DSI */ - kvmppc_core_queue_data_storage(vcpu, 0, ea, dsisr); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, dsisr); return RESUME_GUEST; } @@ -1570,8 +1572,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, if (memslot->flags & KVM_MEM_READONLY) { if (writing) { /* Give the guest a DSI */ - kvmppc_core_queue_data_storage(vcpu, 0, ea, - DSISR_ISSTORE | DSISR_PROTFAULT); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, DSISR_ISSTORE | DSISR_PROTFAULT); return RESUME_GUEST; } kvm_ro = true; diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index 5666d69e202a..059c08ae0340 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -28,7 +28,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) { if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { - kvmppc_core_queue_fpunavail(vcpu, 0); + kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); return true; } @@ -40,7 +40,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) { if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { - kvmppc_core_queue_vsx_unavail(vcpu, 0); + kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); return true; } @@ -52,7 +52,7 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) { if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { - kvmppc_core_queue_vec_unavail(vcpu, 0); + kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); return true; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 9478bbd873c6..339267c33636 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -321,7 +321,8 @@ int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) if (vcpu->mmio_is_write) dsisr |= DSISR_ISSTORE; - kvmppc_core_queue_data_storage(vcpu, 0, + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, vcpu->arch.vaddr_accessed, dsisr); } else { /* -- 2.37.2