On 25.09.24 15:55, Sascha Hauer wrote: > Adoption of Linux commit: > > | commit 6ebbf2ce437b33022d30badd49dc94d33ecfa498 > | Author: Russell King <rmk+kernel@xxxxxxxxxxxxxxxx> > | Date: Mon Jun 30 16:29:12 2014 +0100 > | > | ARM: convert all "mov.* pc, reg" to "bx reg" for ARMv6+ > | > | ARMv6 and greater introduced a new instruction ("bx") which can be used > | to return from function calls. Recent CPUs perform better when the > | "bx lr" instruction is used rather than the "mov pc, lr" instruction, > | and this sequence is strongly recommended to be used by the ARM > | architecture manual (section A.4.1.1). > | > | We provide a new macro "ret" with all its variants for the condition > | code which will resolve to the appropriate instruction. > | > | Rather than doing this piecemeal, and miss some instances, change all > | the "mov pc" instances to use the new macro, with the exception of > | the "movs" instruction and the kprobes code. This allows us to detect > | the "mov pc, lr" case and fix it up - and also gives us the possibility > | of deploying this for other registers depending on the CPU selection. > | > | Reported-by: Will Deacon <will.deacon@xxxxxxx> > | Tested-by: Stephen Warren <swarren@xxxxxxxxxx> # Tegra Jetson TK1 > | Tested-by: Robert Jarzmik <robert.jarzmik@xxxxxxx> # mioa701_bootresume.S > | Tested-by: Andrew Lunn <andrew@xxxxxxx> # Kirkwood > | Tested-by: Shawn Guo <shawn.guo@xxxxxxxxxxxxx> > | Tested-by: Tony Lindgren <tony@xxxxxxxxxxx> # OMAPs > | Tested-by: Gregory CLEMENT <gregory.clement@xxxxxxxxxxxxxxxxxx> # Armada XP, 375, 385 > | Acked-by: Sekhar Nori <nsekhar@xxxxxx> # DaVinci > | Acked-by: Christoffer Dall <christoffer.dall@xxxxxxxxxx> # kvm/hyp > | Acked-by: Haojian Zhuang <haojian.zhuang@xxxxxxxxx> # PXA3xx > | Acked-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx> # Xen > | Tested-by: Uwe Kleine-König <u.kleine-koenig@xxxxxxxxxxxxxx> # ARMv7M > | Tested-by: Simon Horman <horms+renesas@xxxxxxxxxxxx> # Shmobile > | Signed-off-by: Russell King <rmk+kernel@xxxxxxxxxxxxxxxx> > > Signed-off-by: Sascha Hauer <s.hauer@xxxxxxxxxxxxxx> Reviewed-by: Ahmad Fatoum <a.fatoum@xxxxxxxxxxxxxx> > --- > arch/arm/cpu/cache-armv4.S | 11 ++++++----- > arch/arm/cpu/cache-armv5.S | 13 +++++++------ > arch/arm/cpu/cache-armv6.S | 13 +++++++------ > arch/arm/cpu/cache-armv7.S | 9 +++++---- > arch/arm/cpu/hyp.S | 3 ++- > arch/arm/cpu/setupc_32.S | 7 ++++--- > arch/arm/cpu/sm_as.S | 3 ++- > arch/arm/include/asm/assembler.h | 22 ++++++++++++++++++++++ > arch/arm/lib32/ashldi3.S | 3 ++- > arch/arm/lib32/ashrdi3.S | 3 ++- > arch/arm/lib32/lshrdi3.S | 3 ++- > arch/arm/lib32/runtime-offset.S | 2 +- > 12 files changed, 62 insertions(+), 30 deletions(-) > > diff --git a/arch/arm/cpu/cache-armv4.S b/arch/arm/cpu/cache-armv4.S > index 78a098b2fe..024a94c583 100644 > --- a/arch/arm/cpu/cache-armv4.S > +++ b/arch/arm/cpu/cache-armv4.S > @@ -2,6 +2,7 @@ > > #include <linux/linkage.h> > #include <init.h> > +#include <asm/assembler.h> > > #define CACHE_DLINESIZE 32 > > @@ -22,7 +23,7 @@ ENTRY(v4_mmu_cache_on) > mov r0, #0 > mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs > #endif > - mov pc, r12 > + ret r12 > ENDPROC(v4_mmu_cache_on) > > __common_mmu_cache_on: > @@ -43,7 +44,7 @@ ENTRY(v4_mmu_cache_off) > mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 > mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 > #endif > - mov pc, lr > + ret lr > ENDPROC(v4_mmu_cache_off) > > .section .text.v4_mmu_cache_flush > @@ -105,7 +106,7 @@ ENTRY(v4_dma_inv_range) > cmp r0, r1 > blo 1b > mcr p15, 0, r0, c7, c10, 4 @ drain WB > - mov pc, lr > + ret lr > > /* > * dma_clean_range(start, end) > @@ -125,7 +126,7 @@ ENTRY(v4_dma_clean_range) > cmp r0, r1 > blo 1b > mcr p15, 0, r0, c7, c10, 4 @ drain WB > - mov pc, lr > + ret lr > > /* > * dma_flush_range(start, end) > @@ -143,5 +144,5 @@ ENTRY(v4_dma_flush_range) > cmp r0, r1 > blo 1b > mcr p15, 0, r0, c7, c10, 4 @ drain WB > - mov pc, lr > + ret lr > > diff --git a/arch/arm/cpu/cache-armv5.S b/arch/arm/cpu/cache-armv5.S > index bcb7ebf466..6d9cbba015 100644 > --- a/arch/arm/cpu/cache-armv5.S > +++ b/arch/arm/cpu/cache-armv5.S > @@ -2,6 +2,7 @@ > > #include <linux/linkage.h> > #include <init.h> > +#include <asm/assembler.h> > > #define CACHE_DLINESIZE 32 > > @@ -22,7 +23,7 @@ ENTRY(v5_mmu_cache_on) > mov r0, #0 > mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs > #endif > - mov pc, r12 > + ret r12 > ENDPROC(v5_mmu_cache_on) > > __common_mmu_cache_on: > @@ -43,7 +44,7 @@ ENTRY(v5_mmu_cache_off) > mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 > mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 > #endif > - mov pc, lr > + ret lr > ENDPROC(v5_mmu_cache_off) > > .section .text.v5_mmu_cache_flush > @@ -52,7 +53,7 @@ ENTRY(v5_mmu_cache_flush) > bne 1b > mcr p15, 0, r0, c7, c5, 0 @ flush I cache > mcr p15, 0, r0, c7, c10, 4 @ drain WB > - mov pc, lr > + ret lr > ENDPROC(v5_mmu_cache_flush) > > /* > @@ -80,7 +81,7 @@ ENTRY(v5_dma_inv_range) > cmp r0, r1 > blo 1b > mcr p15, 0, r0, c7, c10, 4 @ drain WB > - mov pc, lr > + ret lr > > /* > * dma_clean_range(start, end) > @@ -100,7 +101,7 @@ ENTRY(v5_dma_clean_range) > cmp r0, r1 > blo 1b > mcr p15, 0, r0, c7, c10, 4 @ drain WB > - mov pc, lr > + ret lr > > /* > * dma_flush_range(start, end) > @@ -118,5 +119,5 @@ ENTRY(v5_dma_flush_range) > cmp r0, r1 > blo 1b > mcr p15, 0, r0, c7, c10, 4 @ drain WB > - mov pc, lr > + ret lr > > diff --git a/arch/arm/cpu/cache-armv6.S b/arch/arm/cpu/cache-armv6.S > index cc720314c0..ab965623a3 100644 > --- a/arch/arm/cpu/cache-armv6.S > +++ b/arch/arm/cpu/cache-armv6.S > @@ -2,6 +2,7 @@ > > #include <linux/linkage.h> > #include <init.h> > +#include <asm/assembler.h> > > #define HARVARD_CACHE > #define CACHE_LINE_SIZE 32 > @@ -24,7 +25,7 @@ ENTRY(v6_mmu_cache_on) > mov r0, #0 > mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs > #endif > - mov pc, r12 > + ret r12 > ENDPROC(v6_mmu_cache_on) > > __common_mmu_cache_on: > @@ -46,7 +47,7 @@ ENTRY(v6_mmu_cache_off) > mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 > mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 > #endif > - mov pc, lr > + ret lr > > .section .text.v6_mmu_cache_flush > ENTRY(v6_mmu_cache_flush) > @@ -55,7 +56,7 @@ ENTRY(v6_mmu_cache_flush) > mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB > mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified > mcr p15, 0, r1, c7, c10, 4 @ drain WB > - mov pc, lr > + ret lr > ENDPROC(v6_mmu_cache_flush) > > /* > @@ -95,7 +96,7 @@ ENTRY(v6_dma_inv_range) > blo 1b > mov r0, #0 > mcr p15, 0, r0, c7, c10, 4 @ drain write buffer > - mov pc, lr > + ret lr > ENDPROC(v6_dma_inv_range) > > /* > @@ -117,7 +118,7 @@ ENTRY(v6_dma_clean_range) > blo 1b > mov r0, #0 > mcr p15, 0, r0, c7, c10, 4 @ drain write buffer > - mov pc, lr > + ret lr > ENDPROC(v6_dma_clean_range) > > /* > @@ -139,5 +140,5 @@ ENTRY(v6_dma_flush_range) > blo 1b > mov r0, #0 > mcr p15, 0, r0, c7, c10, 4 @ drain write buffer > - mov pc, lr > + ret lr > ENDPROC(v6_dma_flush_range) > diff --git a/arch/arm/cpu/cache-armv7.S b/arch/arm/cpu/cache-armv7.S > index efd9fe412f..3f6e5e6b73 100644 > --- a/arch/arm/cpu/cache-armv7.S > +++ b/arch/arm/cpu/cache-armv7.S > @@ -2,6 +2,7 @@ > > #include <linux/linkage.h> > #include <init.h> > +#include <asm/assembler.h> > > .section .text.v7_mmu_cache_on > ENTRY(v7_mmu_cache_on) > @@ -140,7 +141,7 @@ iflush: > mcr p15, 0, r12, c7, c5, 0 @ invalidate I+BTB > dsb > isb > - mov pc, lr > + ret lr > ENDPROC(__v7_mmu_cache_flush_invalidate) > > /* > @@ -182,7 +183,7 @@ ENTRY(v7_dma_inv_range) > cmp r0, r1 > blo 1b > dsb > - mov pc, lr > + ret lr > ENDPROC(v7_dma_inv_range) > > /* > @@ -201,7 +202,7 @@ ENTRY(v7_dma_clean_range) > cmp r0, r1 > blo 1b > dsb > - mov pc, lr > + ret lr > ENDPROC(v7_dma_clean_range) > > /* > @@ -220,5 +221,5 @@ ENTRY(v7_dma_flush_range) > cmp r0, r1 > blo 1b > dsb > - mov pc, lr > + ret lr > ENDPROC(v7_dma_flush_range) > diff --git a/arch/arm/cpu/hyp.S b/arch/arm/cpu/hyp.S > index b5e4807877..016bcd79c0 100644 > --- a/arch/arm/cpu/hyp.S > +++ b/arch/arm/cpu/hyp.S > @@ -4,6 +4,7 @@ > #include <asm/system.h> > #include <asm/opcodes-virt.h> > #include <init.h> > +#include <asm/assembler.h> > > .arch_extension sec > .arch_extension virt > @@ -80,7 +81,7 @@ THUMB( orr r12, r12, #PSR_T_BIT ) > __ERET > 1: msr cpsr_c, r12 > 2: > - mov pc, r2 > + ret r2 > ENDPROC(armv7_hyp_install) > > ENTRY(armv7_switch_to_hyp) > diff --git a/arch/arm/cpu/setupc_32.S b/arch/arm/cpu/setupc_32.S > index eafc9b52c6..d3449d9646 100644 > --- a/arch/arm/cpu/setupc_32.S > +++ b/arch/arm/cpu/setupc_32.S > @@ -2,6 +2,7 @@ > > #include <linux/linkage.h> > #include <asm/sections.h> > +#include <asm/assembler.h> > > .section .text.setupc > > @@ -32,7 +33,7 @@ ENTRY(setup_c) > bl sync_caches_for_execution > sub lr, r5, r4 /* adjust return address to new location */ > pop {r4, r5} > - mov pc, lr > + ret lr > ENDPROC(setup_c) > > /* > @@ -76,13 +77,13 @@ ENTRY(relocate_to_adr) > ldr r0,=1f > sub r0, r0, r8 > add r0, r0, r6 > - mov pc, r0 /* jump to relocated address */ > + ret r0 /* jump to relocated address */ > 1: > bl relocate_to_current_adr /* relocate binary */ > > mov lr, r7 > > pop {r3, r4, r5, r6, r7, r8} > - mov pc, lr > + ret lr > > ENDPROC(relocate_to_adr) > diff --git a/arch/arm/cpu/sm_as.S b/arch/arm/cpu/sm_as.S > index f55ac8661c..32007147d4 100644 > --- a/arch/arm/cpu/sm_as.S > +++ b/arch/arm/cpu/sm_as.S > @@ -5,6 +5,7 @@ > #include <asm-generic/memory_layout.h> > #include <asm/secure.h> > #include <asm/system.h> > +#include <asm/assembler.h> > > .arch_extension sec > .arch_extension virt > @@ -147,7 +148,7 @@ secure_monitor: > > hyp_trap: > mrs lr, elr_hyp @ for older asm: .byte 0x00, 0xe3, 0x0e, 0xe1 > - mov pc, lr @ do no switch modes, but > + ret lr @ do no switch modes, but > @ return to caller > > ENTRY(psci_cpu_entry) > diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h > index 4e7ad57170..e8f5625a0a 100644 > --- a/arch/arm/include/asm/assembler.h > +++ b/arch/arm/include/asm/assembler.h > @@ -340,4 +340,26 @@ > blx\c \dst > .endif > .endm > + > + .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo > + .macro ret\c, reg > +#if __LINUX_ARM_ARCH__ < 6 > + mov\c pc, \reg > +#else > + .ifeqs "\reg", "lr" > + bx\c \reg > + .else > + mov\c pc, \reg > + .endif > +#endif > + .endm > + .endr > + > + .macro ret.w, reg > + ret \reg > +#ifdef CONFIG_THUMB2_BAREBOX > + nop > +#endif > + .endm > + > #endif > diff --git a/arch/arm/lib32/ashldi3.S b/arch/arm/lib32/ashldi3.S > index b62e06f602..dccb732078 100644 > --- a/arch/arm/lib32/ashldi3.S > +++ b/arch/arm/lib32/ashldi3.S > @@ -23,6 +23,7 @@ General Public License for more details. > */ > > #include <linux/linkage.h> > +#include <asm/assembler.h> > > #ifdef __ARMEB__ > #define al r1 > @@ -44,7 +45,7 @@ ENTRY(__aeabi_llsl) > THUMB( lsrmi r3, al, ip ) > THUMB( orrmi ah, ah, r3 ) > mov al, al, lsl r2 > - mov pc, lr > + ret lr > > ENDPROC(__ashldi3) > ENDPROC(__aeabi_llsl) > diff --git a/arch/arm/lib32/ashrdi3.S b/arch/arm/lib32/ashrdi3.S > index db849b65fc..3db06281e5 100644 > --- a/arch/arm/lib32/ashrdi3.S > +++ b/arch/arm/lib32/ashrdi3.S > @@ -23,6 +23,7 @@ General Public License for more details. > */ > > #include <linux/linkage.h> > +#include <asm/assembler.h> > > #ifdef __ARMEB__ > #define al r1 > @@ -44,7 +45,7 @@ ENTRY(__aeabi_lasr) > THUMB( lslmi r3, ah, ip ) > THUMB( orrmi al, al, r3 ) > mov ah, ah, asr r2 > - mov pc, lr > + ret lr > > ENDPROC(__ashrdi3) > ENDPROC(__aeabi_lasr) > diff --git a/arch/arm/lib32/lshrdi3.S b/arch/arm/lib32/lshrdi3.S > index e77e96c7bc..5af522482c 100644 > --- a/arch/arm/lib32/lshrdi3.S > +++ b/arch/arm/lib32/lshrdi3.S > @@ -23,6 +23,7 @@ General Public License for more details. > */ > > #include <linux/linkage.h> > +#include <asm/assembler.h> > > #ifdef __ARMEB__ > #define al r1 > @@ -44,7 +45,7 @@ ENTRY(__aeabi_llsr) > THUMB( lslmi r3, ah, ip ) > THUMB( orrmi al, al, r3 ) > mov ah, ah, lsr r2 > - mov pc, lr > + ret lr > > ENDPROC(__lshrdi3) > ENDPROC(__aeabi_llsr) > diff --git a/arch/arm/lib32/runtime-offset.S b/arch/arm/lib32/runtime-offset.S > index ac104de119..d9ba864b3b 100644 > --- a/arch/arm/lib32/runtime-offset.S > +++ b/arch/arm/lib32/runtime-offset.S > @@ -14,7 +14,7 @@ ENTRY(get_runtime_offset) > ldr r1, linkadr > subs r0, r0, r1 > THUMB( adds r0, r0, #1) > - mov pc, lr > + ret lr > > linkadr: > .word get_runtime_offset > -- Pengutronix e.K. | | Steuerwalder Str. 21 | http://www.pengutronix.de/ | 31137 Hildesheim, Germany | Phone: +49-5121-206917-0 | Amtsgericht Hildesheim, HRA 2686 | Fax: +49-5121-206917-5555 |