From: Helge Deller <deller@xxxxxx> No need to extract upper and lower 32bit values of the 64-bit value. Use gcc's %R1 to access lower 32-bits and %1 to access upper 32-bits instead. Signed-off-by: Helge Deller <deller@xxxxxx> --- arch/parisc/kernel/unaligned.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 170d0dda4213..ce25acfe4889 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -338,25 +338,24 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) : "r19", "r20", "r21", "r22", "r1" ); #else { - unsigned long valh = (val >> 32), vall = (val & 0xffffffffl); __asm__ __volatile__ ( -" mtsp %4, %%sr1\n" -" zdep %2, 29, 2, %%r19\n" -" dep %%r0, 31, 2, %3\n" +" mtsp %3, %%sr1\n" +" zdep %R1, 29, 2, %%r19\n" +" dep %%r0, 31, 2, %2\n" " mtsar %%r19\n" " zvdepi -2, 32, %%r19\n" -"1: ldw 0(%%sr1,%3),%%r20\n" -"2: ldw 8(%%sr1,%3),%%r21\n" -" vshd %1, %2, %%r1\n" +"1: ldw 0(%%sr1,%2),%%r20\n" +"2: ldw 8(%%sr1,%2),%%r21\n" +" vshd %1, %R1, %%r1\n" " vshd %%r0, %1, %1\n" -" vshd %2, %%r0, %2\n" +" vshd %R1, %%r0, %R1\n" " and %%r20, %%r19, %%r20\n" " andcm %%r21, %%r19, %%r21\n" " or %1, %%r20, %1\n" -" or %2, %%r21, %2\n" -"3: stw %1,0(%%sr1,%3)\n" -"4: stw %%r1,4(%%sr1,%3)\n" -"5: stw %2,8(%%sr1,%3)\n" +" or %R1, %%r21, %R1\n" +"3: stw %1,0(%%sr1,%2)\n" +"4: stw %%r1,4(%%sr1,%2)\n" +"5: stw %R1,8(%%sr1,%2)\n" "6: \n" ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b) ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b) @@ -364,7 +363,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b) ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b) : "+r" (ret) - : "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr) + : "r" (val), "r" (regs->ior), "r" (regs->isr) : "r19", "r20", "r21", "r1" ); } #endif -- 2.41.0