The optimized versions are there for ARM32, but since 0d53f3c584a2 ("arm: use asm-generic/io.h") they were no longer used. Activate them again. Reviewed-by: Ahmad Fatoum <a.fatoum@xxxxxxxxxxxxxx> Signed-off-by: Sascha Hauer <s.hauer@xxxxxxxxxxxxxx> --- arch/arm/include/asm/io.h | 24 ++++++++++++++++++++++++ arch/arm/lib32/io-readsb.S | 6 +++--- arch/arm/lib32/io-readsl.S | 6 +++--- arch/arm/lib32/io-readsw-armv4.S | 6 +++--- arch/arm/lib32/io-writesb.S | 6 +++--- arch/arm/lib32/io-writesl.S | 6 +++--- 6 files changed, 39 insertions(+), 15 deletions(-) diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 486b142950..9e9b13ad18 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -3,6 +3,30 @@ #ifndef __ASM_ARM_IO_H #define __ASM_ARM_IO_H +#include <linux/compiler.h> + +#ifndef CONFIG_CPU_64 +/* + * Generic IO read/write. These perform native-endian accesses. Note + * that some architectures will want to re-define __raw_{read,write}w. + */ +void __raw_writesb(volatile void __iomem *addr, const void *data, int bytelen); +void __raw_writesw(volatile void __iomem *addr, const void *data, int wordlen); +void __raw_writesl(volatile void __iomem *addr, const void *data, int longlen); + +void __raw_readsb(const volatile void __iomem *addr, void *data, int bytelen); +void __raw_readsw(const volatile void __iomem *addr, void *data, int wordlen); +void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen); + +#define readsb(p,d,l) __raw_readsb(p,d,l) +#define readsw(p,d,l) __raw_readsw(p,d,l) +#define readsl(p,d,l) __raw_readsl(p,d,l) + +#define writesb(p,d,l) __raw_writesb(p,d,l) +#define writesw(p,d,l) __raw_writesw(p,d,l) +#define writesl(p,d,l) __raw_writesl(p,d,l) +#endif + #define IO_SPACE_LIMIT 0 #define memcpy_fromio memcpy_fromio diff --git a/arch/arm/lib32/io-readsb.S b/arch/arm/lib32/io-readsb.S index f853c48021..41f68092c5 100644 --- a/arch/arm/lib32/io-readsb.S +++ b/arch/arm/lib32/io-readsb.S @@ -7,7 +7,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> -.section .text.readsb +.section .text.__raw_readsb .Linsb_align: rsb ip, ip, #4 cmp ip, r2 @@ -22,7 +22,7 @@ subs r2, r2, ip bne .Linsb_aligned -ENTRY(readsb) +ENTRY(__raw_readsb) teq r2, #0 @ do we have to check for the zero len? moveq pc, lr ands ip, r1, #3 @@ -119,4 +119,4 @@ ENTRY(readsb) strgtb r3, [r1] ldmfd sp!, {r4 - r6, pc} -ENDPROC(readsb) +ENDPROC(__raw_readsb) diff --git a/arch/arm/lib32/io-readsl.S b/arch/arm/lib32/io-readsl.S index bb8b96ded0..e1855fd636 100644 --- a/arch/arm/lib32/io-readsl.S +++ b/arch/arm/lib32/io-readsl.S @@ -7,9 +7,9 @@ #include <linux/linkage.h> #include <asm/assembler.h> -.section .text.readsl +.section .text.__raw_readsl -ENTRY(readsl) +ENTRY(__raw_readsl) teq r2, #0 @ do we have to check for the zero len? moveq pc, lr ands ip, r1, #3 @@ -75,4 +75,4 @@ ENTRY(readsl) 8: mov r3, ip, get_byte_0 strb r3, [r1, #0] mov pc, lr -ENDPROC(readsl) +ENDPROC(__raw_readsl) diff --git a/arch/arm/lib32/io-readsw-armv4.S b/arch/arm/lib32/io-readsw-armv4.S index 25f2778860..9fb7fd7576 100644 --- a/arch/arm/lib32/io-readsw-armv4.S +++ b/arch/arm/lib32/io-readsw-armv4.S @@ -15,7 +15,7 @@ #endif .endm -.section .text.readsw +.section .text.__raw_readsw .Linsw_align: movs ip, r1, lsl #31 bne .Linsw_noalign @@ -23,7 +23,7 @@ sub r2, r2, #1 strh ip, [r1], #2 -ENTRY(readsw) +ENTRY(__raw_readsw) teq r2, #0 moveq pc, lr tst r1, #3 @@ -127,4 +127,4 @@ ENTRY(readsw) _BE_ONLY_( movne ip, ip, lsr #24 ) strneb ip, [r1] ldmfd sp!, {r4, pc} -ENDPROC(readsw) +ENDPROC(__raw_readsw) diff --git a/arch/arm/lib32/io-writesb.S b/arch/arm/lib32/io-writesb.S index 313839bff6..b6ce85f0d4 100644 --- a/arch/arm/lib32/io-writesb.S +++ b/arch/arm/lib32/io-writesb.S @@ -27,7 +27,7 @@ #endif .endm -.section .text.writesb +.section .text.__raw_writesb .Loutsb_align: rsb ip, ip, #4 cmp ip, r2 @@ -42,7 +42,7 @@ subs r2, r2, ip bne .Loutsb_aligned -ENTRY(writesb) +ENTRY(__raw_writesb) teq r2, #0 @ do we have to check for the zero len? moveq pc, lr ands ip, r1, #3 @@ -90,4 +90,4 @@ ENTRY(writesb) strgtb r3, [r0] ldmfd sp!, {r4, r5, pc} -ENDPROC(writesb) +ENDPROC(__raw_writesb) diff --git a/arch/arm/lib32/io-writesl.S b/arch/arm/lib32/io-writesl.S index d9a29d9153..ed91ae19b7 100644 --- a/arch/arm/lib32/io-writesl.S +++ b/arch/arm/lib32/io-writesl.S @@ -7,9 +7,9 @@ #include <linux/linkage.h> #include <asm/assembler.h> -.section .text.writesl +.section .text.__raw_writesl -ENTRY(writesl) +ENTRY(__raw_writesl) teq r2, #0 @ do we have to check for the zero len? moveq pc, lr ands ip, r1, #3 @@ -63,4 +63,4 @@ ENTRY(writesl) str ip, [r0] bne 6b mov pc, lr -ENDPROC(writesl) +ENDPROC(__raw_writesl) -- 2.39.5