ISAv3 radix modes provide SLBIA variants which can invalidate ERAT for effPID!=0 or for effLPID!=0, which allows user and guest invalidations to retain kernel/host ERAT entries. Signed-off-by: Nicholas Piggin <npiggin@xxxxxxxxx> --- arch/powerpc/include/asm/ppc-opcode.h | 8 ++++++++ arch/powerpc/kvm/book3s_hv_builtin.c | 6 ++++-- arch/powerpc/mm/book3s64/radix_tlb.c | 6 +++--- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 156102828a3b..0f67d672f719 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -588,7 +588,15 @@ #define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \ ((IH & 0x7) << 21)) + +/* + * These may only be used by ARCH_300 (radix implies ARCH_300). + * USER/GUEST invalidates may only be used by radix mode (on HPT these + * would also invalidate various SLBEs which may not be desired). + */ #define PPC_ARCH_300_INVALIDATE_ERAT PPC_SLBIA(7) +#define PPC_RADIX_INVALIDATE_ERAT_USER PPC_SLBIA(3) +#define PPC_RADIX_INVALIDATE_ERAT_GUEST PPC_SLBIA(6) #define VCMPEQUD_RC(vrt, vra, vrb) stringify_in_c(.long PPC_INST_VCMPEQUD | \ ___PPC_RT(vrt) | ___PPC_RA(vra) | \ diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 960154262665..08a32c3e6153 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -820,6 +820,8 @@ static void flush_guest_tlb(struct kvm *kvm) : : "r" (rb), "i" (1), "i" (1), "i" (0), "r" (0) : "memory"); } + asm volatile("ptesync": : :"memory"); + asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory"); } else { for (set = 0; set < kvm->arch.tlb_sets; ++set) { /* R=0 PRS=0 RIC=0 */ @@ -828,9 +830,9 @@ static void flush_guest_tlb(struct kvm *kvm) "r" (0) : "memory"); rb += PPC_BIT(51); /* increment set number */ } + asm volatile("ptesync": : :"memory"); + asm volatile(PPC_ARCH_300_INVALIDATE_ERAT : : :"memory"); } - asm volatile("ptesync": : :"memory"); - asm volatile(PPC_ARCH_300_INVALIDATE_ERAT : : :"memory"); } void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu, diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c index 17f33b6645f9..fb880a043898 100644 --- a/arch/powerpc/mm/book3s64/radix_tlb.c +++ b/arch/powerpc/mm/book3s64/radix_tlb.c @@ -258,7 +258,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) __tlbiel_pid(pid, set, RIC_FLUSH_TLB); asm volatile("ptesync": : :"memory"); - asm volatile(PPC_ARCH_300_INVALIDATE_ERAT "; isync" : : :"memory"); + asm volatile(PPC_RADIX_INVALIDATE_ERAT_USER "; isync" : : :"memory"); } static inline void _tlbie_pid(unsigned long pid, unsigned long ric) @@ -310,7 +310,7 @@ static inline void _tlbiel_lpid(unsigned long lpid, unsigned long ric) __tlbiel_lpid(lpid, set, RIC_FLUSH_TLB); asm volatile("ptesync": : :"memory"); - asm volatile(PPC_ARCH_300_INVALIDATE_ERAT "; isync" : : :"memory"); + asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST "; isync" : : :"memory"); } static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric) @@ -362,7 +362,7 @@ static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric) __tlbiel_lpid_guest(lpid, set, RIC_FLUSH_TLB); asm volatile("ptesync": : :"memory"); - asm volatile(PPC_ARCH_300_INVALIDATE_ERAT : : :"memory"); + asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory"); } -- 2.20.1