This allows to compile PBL code with n64 ABI, which we use when CONFIG_64BIT is set. Signed-off-by: Denis Orlov <denorl2009@xxxxxxxxx> --- arch/mips/include/asm/pbl_macros.h | 28 ++++++++++++++-------------- arch/mips/include/asm/pbl_nmon.h | 10 +++++----- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/arch/mips/include/asm/pbl_macros.h b/arch/mips/include/asm/pbl_macros.h index c62910ff60..e60af38442 100644 --- a/arch/mips/include/asm/pbl_macros.h +++ b/arch/mips/include/asm/pbl_macros.h @@ -30,9 +30,9 @@ .set noreorder li t9, \addr li t8, \val - lw t7, 0(t9) - or t7, t8 - sw t7, 0(t9) + lw ta3, 0(t9) + or ta3, t8 + sw ta3, 0(t9) .set pop .endm @@ -41,10 +41,10 @@ .set noreorder li t9, \addr li t8, \clr - lw t7, 0(t9) + lw ta3, 0(t9) not t8, t8 - and t7, t8 - sw t7, 0(t9) + and ta3, t8 + sw ta3, 0(t9) .set pop .endm @@ -123,15 +123,15 @@ #define WSIZE 4 copy_loop: /* copy from source address [a0] */ - lw t4, WSIZE * 0(a0) - lw t5, WSIZE * 1(a0) - lw t6, WSIZE * 2(a0) - lw t7, WSIZE * 3(a0) + lw ta0, WSIZE * 0(a0) + lw ta1, WSIZE * 1(a0) + lw ta2, WSIZE * 2(a0) + lw ta3, WSIZE * 3(a0) /* copy to target address [a1] */ - sw t4, WSIZE * 0(a1) - sw t5, WSIZE * 1(a1) - sw t6, WSIZE * 2(a1) - sw t7, WSIZE * 3(a1) + sw ta0, WSIZE * 0(a1) + sw ta1, WSIZE * 1(a1) + sw ta2, WSIZE * 2(a1) + sw ta3, WSIZE * 3(a1) addi a0, WSIZE * 4 subu t3, a0, a2 blez t3, copy_loop diff --git a/arch/mips/include/asm/pbl_nmon.h b/arch/mips/include/asm/pbl_nmon.h index 0e4ec39967..7c8ec9d204 100644 --- a/arch/mips/include/asm/pbl_nmon.h +++ b/arch/mips/include/asm/pbl_nmon.h @@ -39,12 +39,12 @@ .set push .set reorder - move t6, a0 - li t5, 32 + move ta2, a0 + li ta1, 32 202: - addi t5, t5, -4 - srlv a0, t6, t5 + addi ta1, ta1, -4 + srlv a0, ta2, ta1 /* output one hex digit */ andi a0, a0, 15 @@ -57,7 +57,7 @@ debug_ll_outc_a0 - bgtz t5, 202b + bgtz ta1, 202b .set pop #endif /* CONFIG_DEBUG_LL */ -- 2.41.0