The LDCW barriers in the tlb_lock and tlb_unlock0 macros can be removed. In tlb_lock, the barrier isn't needed because there are no prior accesses to be concerned about. In tlb_unlock0, we replace the LDCW barrier with an ordered store. This ensures that all prior accesses are performed before the store is performed. Signed-off-by: Dave Anglin <dave.anglin@xxxxxxxx> --- diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 4b484ec7c7da..60b966b29d6c 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -454,7 +454,6 @@ nop LDREG 0(\ptp),\pte bb,<,n \pte,_PAGE_PRESENT_BIT,3f - LDCW 0(\tmp),\tmp1 b \fault stw \spc,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) @@ -464,13 +463,16 @@ 3: .endm - /* Release pa_tlb_lock lock without reloading lock address. */ + /* Release pa_tlb_lock lock without reloading lock address. + Note that the values in the register spc are limited to + NR_SPACE_IDS (262144). Thus, the stw instruction always + stores a nonzero value even when register spc is 64 bits. + We use an ordered store to ensure all prior accesses are + performed prior to releasing the lock. */ .macro tlb_unlock0 spc,tmp,tmp1 #ifdef CONFIG_SMP 98: or,COND(=) %r0,\spc,%r0 - LDCW 0(\tmp),\tmp1 - or,COND(=) %r0,\spc,%r0 - stw \spc,0(\tmp) + stw,ma \spc,0(\tmp) 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP) #endif .endm