Each of the operations in ccp_run_cmd() needs several hundred bytes of kernel stack. Depending on the inlining, these may need separate stack slots that add up to more than the warning limit, as shown in this clang based build: drivers/crypto/ccp/ccp-ops.c:871:12: error: stack frame size of 1164 bytes in function 'ccp_run_aes_cmd' [-Werror,-Wframe-larger-than=] static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) The problem may also happen when there is no warning, e.g. in the ccp_run_cmd()->ccp_run_aes_cmd()->ccp_run_aes_gcm_cmd() call chain with over 2000 bytes. Mark each individual function as 'noinline_for_stack' to prevent this from happening, and move the calls to the two special cases for aes into the top-level function. This will keep the actual combined stack usage to the mimimum: 828 bytes for ccp_run_aes_gcm_cmd() and at most 524 bytes for each of the other cases. Fixes: 63b945091a07 ("crypto: ccp - CCP device driver and interface support") Signed-off-by: Arnd Bergmann <arnd@xxxxxxxx> --- drivers/crypto/ccp/ccp-ops.c | 52 +++++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 21 deletions(-) diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 866b2e05ca77..97293d05a759 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -455,8 +455,8 @@ static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q, return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true); } -static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, - struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_aes_engine *aes = &cmd->u.aes; struct ccp_dm_workarea key, ctx; @@ -611,8 +611,8 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, return ret; } -static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, - struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_aes_engine *aes = &cmd->u.aes; struct ccp_dm_workarea key, ctx, final_wa, tag; @@ -868,7 +868,8 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, return ret; } -static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_aes_engine *aes = &cmd->u.aes; struct ccp_dm_workarea key, ctx; @@ -878,12 +879,6 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) bool in_place = false; int ret; - if (aes->mode == CCP_AES_MODE_CMAC) - return ccp_run_aes_cmac_cmd(cmd_q, cmd); - - if (aes->mode == CCP_AES_MODE_GCM) - return ccp_run_aes_gcm_cmd(cmd_q, cmd); - if (!((aes->key_len == AES_KEYSIZE_128) || (aes->key_len == AES_KEYSIZE_192) || (aes->key_len == AES_KEYSIZE_256))) @@ -1050,8 +1045,8 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return ret; } -static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, - struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_xts_aes_engine *xts = &cmd->u.xts; struct ccp_dm_workarea key, ctx; @@ -1250,7 +1245,8 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, return ret; } -static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_des3_engine *des3 = &cmd->u.des3; @@ -1446,7 +1442,8 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return ret; } -static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_sha_engine *sha = &cmd->u.sha; struct ccp_dm_workarea ctx; @@ -1790,7 +1787,8 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return ret; } -static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_rsa_engine *rsa = &cmd->u.rsa; struct ccp_dm_workarea exp, src, dst; @@ -1921,8 +1919,8 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return ret; } -static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, - struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_passthru_engine *pt = &cmd->u.passthru; struct ccp_dm_workarea mask; @@ -2053,7 +2051,8 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, return ret; } -static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, +static noinline_for_stack int +ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap; @@ -2394,7 +2393,8 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return ret; } -static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +static noinline_for_stack int +ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_ecc_engine *ecc = &cmd->u.ecc; @@ -2431,7 +2431,17 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) switch (cmd->engine) { case CCP_ENGINE_AES: - ret = ccp_run_aes_cmd(cmd_q, cmd); + switch (cmd->u.aes.mode) { + case CCP_AES_MODE_CMAC: + ret = ccp_run_aes_cmac_cmd(cmd_q, cmd); + break; + case CCP_AES_MODE_GCM: + ret = ccp_run_aes_gcm_cmd(cmd_q, cmd); + break; + default: + ret = ccp_run_aes_cmd(cmd_q, cmd); + break; + } break; case CCP_ENGINE_XTS_AES_128: ret = ccp_run_xts_aes_cmd(cmd_q, cmd); -- 2.20.0