An instruction accessing a mmio address, generates a HDSI fault. This fault is appropriately handled by the Hypervisor. However in the case of secureVMs, the fault is delivered to the ultravisor. Unfortunately the Ultravisor has no correct-way to fetch the faulting instruction. The PEF architecture does not allow Ultravisor to enable MMU translation. Walking the two level page table to read the instruction can race with other vcpus modifying the SVM's process scoped page table. This problem can be correctly solved with some help from the kernel. Capture the faulting instruction in SPRG0 register, before executing the faulting instruction. This enables the ultravisor to easily procure the faulting instruction and emulate it. Signed-off-by: Ram Pai <linuxram@xxxxxxxxxx> --- arch/powerpc/include/asm/io.h | 85 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 75 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 635969b..7ef663d 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -35,6 +35,7 @@ #include <asm/mmu.h> #include <asm/ppc_asm.h> #include <asm/pgtable.h> +#include <asm/svm.h> #define SIO_CONFIG_RA 0x398 #define SIO_CONFIG_RD 0x399 @@ -105,34 +106,98 @@ static inline u##size name(const volatile u##size __iomem *addr) \ { \ u##size ret; \ - __asm__ __volatile__("sync;"#insn" %0,%y1;twi 0,%0,0;isync" \ - : "=r" (ret) : "Z" (*addr) : "memory"); \ + if (is_secure_guest()) { \ + __asm__ __volatile__("mfsprg0 %3;" \ + "lnia %2;" \ + "ld %2,12(%2);" \ + "mtsprg0 %2;" \ + "sync;" \ + #insn" %0,%y1;" \ + "twi 0,%0,0;" \ + "isync;" \ + "mtsprg0 %3" \ + : "=r" (ret) \ + : "Z" (*addr), "r" (0), "r" (0) \ + : "memory"); \ + } else { \ + __asm__ __volatile__("sync;" \ + #insn" %0,%y1;" \ + "twi 0,%0,0;" \ + "isync" \ + : "=r" (ret) : "Z" (*addr) : "memory"); \ + } \ return ret; \ } #define DEF_MMIO_OUT_X(name, size, insn) \ static inline void name(volatile u##size __iomem *addr, u##size val) \ { \ - __asm__ __volatile__("sync;"#insn" %1,%y0" \ - : "=Z" (*addr) : "r" (val) : "memory"); \ - mmiowb_set_pending(); \ + if (is_secure_guest()) { \ + __asm__ __volatile__("mfsprg0 %3;" \ + "lnia %2;" \ + "ld %2,12(%2);" \ + "mtsprg0 %2;" \ + "sync;" \ + #insn" %1,%y0;" \ + "mtsprg0 %3" \ + : "=Z" (*addr) \ + : "r" (val), "r" (0), "r" (0) \ + : "memory"); \ + } else { \ + __asm__ __volatile__("sync;" \ + #insn" %1,%y0" \ + : "=Z" (*addr) : "r" (val) : "memory"); \ + mmiowb_set_pending(); \ + } \ } #define DEF_MMIO_IN_D(name, size, insn) \ static inline u##size name(const volatile u##size __iomem *addr) \ { \ u##size ret; \ - __asm__ __volatile__("sync;"#insn"%U1%X1 %0,%1;twi 0,%0,0;isync"\ - : "=r" (ret) : "m" (*addr) : "memory"); \ + if (is_secure_guest()) { \ + __asm__ __volatile__("mfsprg0 %3;" \ + "lnia %2;" \ + "ld %2,12(%2);" \ + "mtsprg0 %2;" \ + "sync;" \ + #insn"%U1%X1 %0,%1;" \ + "twi 0,%0,0;" \ + "isync;" \ + "mtsprg0 %3" \ + : "=r" (ret) \ + : "m" (*addr), "r" (0), "r" (0) \ + : "memory"); \ + } else { \ + __asm__ __volatile__("sync;" \ + #insn"%U1%X1 %0,%1;" \ + "twi 0,%0,0;" \ + "isync" \ + : "=r" (ret) : "m" (*addr) : "memory"); \ + } \ return ret; \ } #define DEF_MMIO_OUT_D(name, size, insn) \ static inline void name(volatile u##size __iomem *addr, u##size val) \ { \ - __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \ - : "=m" (*addr) : "r" (val) : "memory"); \ - mmiowb_set_pending(); \ + if (is_secure_guest()) { \ + __asm__ __volatile__("mfsprg0 %3;" \ + "lnia %2;" \ + "ld %2,12(%2);" \ + "mtsprg0 %2;" \ + "sync;" \ + #insn"%U0%X0 %1,%0;" \ + "mtsprg0 %3" \ + : "=m" (*addr) \ + : "r" (val), "r" (0), "r" (0) \ + : "memory"); \ + } else { \ + __asm__ __volatile__("sync;" \ + #insn"%U0%X0 %1,%0" \ + : "=m" (*addr) : "r" (val) : "memory"); \ + mmiowb_set_pending(); \ + } \ } DEF_MMIO_IN_D(in_8, 8, lbz); -- 1.8.3.1