Now that the svcpu sync is interrupt aware we can enable interrupts earlier in the exit code path again, moving 32bit and 64bit closer together. While at it, document the fact that we're always executing the exit path with interrupts enabled so that the next person doesn't trap over this. Signed-off-by: Alexander Graf <agraf@xxxxxxx> --- arch/powerpc/kvm/book3s_interrupts.S | 6 +----- arch/powerpc/kvm/book3s_rmhandlers.S | 6 +----- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index 2f7d571..a8cc2b2 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -129,6 +129,7 @@ kvm_start_lightweight: * R12 = exit handler id * R13 = PACA * SVCPU.* = guest * + * MSR.EE = 1 * */ @@ -149,11 +150,6 @@ kvm_start_lightweight: nop #ifdef CONFIG_PPC_BOOK3S_64 - /* Re-enable interrupts */ - ld r3, HSTATE_HOST_MSR(r13) - ori r3, r3, MSR_EE - MTMSR_EERI(r3) - /* * Reload kernel SPRG3 value. * No need to save guest value as usermode can't modify SPRG3. diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index a38c4c9..c3c5231 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline) li r6, MSR_IR | MSR_DR andc r6, r5, r6 /* Clear DR and IR in MSR value */ -#ifdef CONFIG_PPC_BOOK3S_32 /* * Set EE in HOST_MSR so that it's enabled when we get into our - * C exit handler function. On 64-bit we delay enabling - * interrupts until we have finished transferring stuff - * to or from the PACA. + * C exit handler function. */ ori r5, r5, MSR_EE -#endif mtsrr0 r7 mtsrr1 r6 RFI -- 1.8.1.4 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html