From: James Cowgill <James.Cowgill@xxxxxxxxxx> This patch has been added to the 3.12 stable tree. If you have any objections, please let us know. =============== commit 884b426917e4b3c85f33b382c792a94305dfdd62 upstream. If copy_from_user is called with a large buffer (>= 128 bytes) and the userspace buffer refers partially to unreadable memory, then it is possible for Octeon's copy_from_user to report the wrong number of bytes have been copied. In the case where the buffer size is an exact multiple of 128 and the fault occurs in the last 64 bytes, copy_from_user will report that all the bytes were copied successfully but leave some garbage in the destination buffer. The bug is in the main __copy_user_common loop in octeon-memcpy.S where in the middle of the loop, src and dst are incremented by 128 bytes. The l_exc_copy fault handler is used after this but that assumes that "src < THREAD_BUADDR($28)". This is not the case if src has already been incremented. Fix by adding an extra fault handler which rewinds the src and dst pointers 128 bytes before falling though to l_exc_copy. Thanks to the pwritev test from the strace test suite for originally highlighting this bug! Fixes: 5b3b16880f40 ("MIPS: Add Cavium OCTEON processor support ...") Signed-off-by: James Cowgill <James.Cowgill@xxxxxxxxxx> Acked-by: David Daney <david.daney@xxxxxxxxxx> Reviewed-by: James Hogan <james.hogan@xxxxxxxxxx> Cc: Ralf Baechle <ralf@xxxxxxxxxxxxxx> Cc: linux-mips@xxxxxxxxxxxxxx Patchwork: https://patchwork.linux-mips.org/patch/14978/ Signed-off-by: James Hogan <james.hogan@xxxxxxxxxx> Signed-off-by: Jiri Slaby <jslaby@xxxxxxx> --- arch/mips/cavium-octeon/octeon-memcpy.S | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S index 64e08df51d65..8b7004132491 100644 --- a/arch/mips/cavium-octeon/octeon-memcpy.S +++ b/arch/mips/cavium-octeon/octeon-memcpy.S @@ -208,18 +208,18 @@ EXC( STORE t2, UNIT(6)(dst), s_exc_p10u) ADD src, src, 16*NBYTES EXC( STORE t3, UNIT(7)(dst), s_exc_p9u) ADD dst, dst, 16*NBYTES -EXC( LOAD t0, UNIT(-8)(src), l_exc_copy) -EXC( LOAD t1, UNIT(-7)(src), l_exc_copy) -EXC( LOAD t2, UNIT(-6)(src), l_exc_copy) -EXC( LOAD t3, UNIT(-5)(src), l_exc_copy) +EXC( LOAD t0, UNIT(-8)(src), l_exc_copy_rewind16) +EXC( LOAD t1, UNIT(-7)(src), l_exc_copy_rewind16) +EXC( LOAD t2, UNIT(-6)(src), l_exc_copy_rewind16) +EXC( LOAD t3, UNIT(-5)(src), l_exc_copy_rewind16) EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u) EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u) EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u) EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u) -EXC( LOAD t0, UNIT(-4)(src), l_exc_copy) -EXC( LOAD t1, UNIT(-3)(src), l_exc_copy) -EXC( LOAD t2, UNIT(-2)(src), l_exc_copy) -EXC( LOAD t3, UNIT(-1)(src), l_exc_copy) +EXC( LOAD t0, UNIT(-4)(src), l_exc_copy_rewind16) +EXC( LOAD t1, UNIT(-3)(src), l_exc_copy_rewind16) +EXC( LOAD t2, UNIT(-2)(src), l_exc_copy_rewind16) +EXC( LOAD t3, UNIT(-1)(src), l_exc_copy_rewind16) EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u) EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u) EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u) @@ -383,6 +383,10 @@ done: nop END(memcpy) +l_exc_copy_rewind16: + /* Rewind src and dst by 16*NBYTES for l_exc_copy */ + SUB src, src, 16*NBYTES + SUB dst, dst, 16*NBYTES l_exc_copy: /* * Copy bytes from src until faulting load address (or until a -- 2.12.0