The following commit has been merged into the objtool/core branch of tip: Commit-ID: ce5846668076aa76a17ab559f0296374e3611fec Gitweb: https://git.kernel.org/tip/ce5846668076aa76a17ab559f0296374e3611fec Author: Josh Poimboeuf <jpoimboe@xxxxxxxxxx> AuthorDate: Wed, 24 Feb 2021 10:29:22 -06:00 Committer: Josh Poimboeuf <jpoimboe@xxxxxxxxxx> CommitterDate: Mon, 19 Apr 2021 12:36:36 -05:00 x86/crypto/sha256-avx2: Standardize stack alignment prologue Use a more standard prologue for saving the stack pointer before realigning the stack. This enables ORC unwinding by allowing objtool to understand the stack realignment. Signed-off-by: Josh Poimboeuf <jpoimboe@xxxxxxxxxx> Tested-by: Ard Biesheuvel <ardb@xxxxxxxxxx> Acked-by: Ard Biesheuvel <ardb@xxxxxxxxxx> Tested-by: Sami Tolvanen <samitolvanen@xxxxxxxxxx> Acked-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Acked-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx> Link: https://lore.kernel.org/r/8048e7444c49a8137f05265262b83dc50f8fb7f3.1614182415.git.jpoimboe@xxxxxxxxxx --- arch/x86/crypto/sha256-avx2-asm.S | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S index 11ff60c..4087f74 100644 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/crypto/sha256-avx2-asm.S @@ -117,15 +117,13 @@ _XMM_SAVE_SIZE = 0 _INP_END_SIZE = 8 _INP_SIZE = 8 _CTX_SIZE = 8 -_RSP_SIZE = 8 _XFER = 0 _XMM_SAVE = _XFER + _XFER_SIZE _INP_END = _XMM_SAVE + _XMM_SAVE_SIZE _INP = _INP_END + _INP_END_SIZE _CTX = _INP + _INP_SIZE -_RSP = _CTX + _CTX_SIZE -STACK_SIZE = _RSP + _RSP_SIZE +STACK_SIZE = _CTX + _CTX_SIZE # rotate_Xs # Rotate values of symbols X0...X3 @@ -533,11 +531,11 @@ SYM_FUNC_START(sha256_transform_rorx) pushq %r14 pushq %r15 - mov %rsp, %rax + push %rbp + mov %rsp, %rbp + subq $STACK_SIZE, %rsp and $-32, %rsp # align rsp to 32 byte boundary - mov %rax, _RSP(%rsp) - shl $6, NUM_BLKS # convert to bytes jz done_hash @@ -704,7 +702,8 @@ only_one_block: done_hash: - mov _RSP(%rsp), %rsp + mov %rbp, %rsp + pop %rbp popq %r15 popq %r14