Move all padding work to a dedicated function. Signed-off-by: Corentin Labbe <clabbe@xxxxxxxxxxxx> --- .../crypto/allwinner/sun8i-ss/sun8i-ss-hash.c | 87 ++++++++++++++----- 1 file changed, 65 insertions(+), 22 deletions(-) diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index cb510ec21ec4..6adb6a0428b7 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -328,6 +328,64 @@ int sun8i_ss_hash_digest(struct ahash_request *areq) return crypto_transfer_hash_request_to_engine(engine, areq); } +static u64 hash_pad(u32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs) +{ + u64 fill, min_fill, j, k; + __be64 *bebits; + __le64 *lebits; + + j = padi; + buf[j++] = cpu_to_le32(0x80); + + if (bs == 64) { + fill = 64 - (byte_count % 64); + min_fill = 2 * sizeof(u32) + sizeof(u32); + } else { + fill = 128 - (byte_count % 128); + min_fill = 4 * sizeof(u32) + sizeof(u32); + } + + if (fill < min_fill) + fill += bs; + + k = j; + j += (fill - min_fill) / sizeof(u32); + if (j * 4 > bufsize) { + pr_err("%s OVERFLOW %llu\n", __func__, j); + return 0; + } + for (; k < j; k++) + buf[k] = 0; + + if (le) { + /* MD5 */ + lebits = (__le64 *)&buf[j]; + *lebits = cpu_to_le64(byte_count << 3); + j += 2; + } else { + if (bs == 64) { + /* sha1 sha224 sha256 */ + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count << 3); + j += 2; + } else { + /* sha384 sha512*/ + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count >> 61); + j += 2; + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count << 3); + j += 2; + } + } + if (j * 4 > bufsize) { + pr_err("%s OVERFLOW %llu\n", __func__, j); + return 0; + } + + return j; +} + /* sun8i_ss_hash_run - run an ahash request * Send the data of the request to the SS along with an extra SG with padding */ @@ -342,11 +400,9 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) struct scatterlist *sg; int nr_sgs, err, digestsize; unsigned int len; - u64 fill, min_fill, byte_count; + u64 byte_count; void *pad, *result; int j, i, k, todo; - __be64 *bebits; - __le64 *lebits; dma_addr_t addr_res, addr_pad; __le32 *bf; @@ -421,33 +477,20 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) i--; byte_count = areq->nbytes; - bf[j++] = cpu_to_le32(0x80); - - fill = 64 - (byte_count % 64); - min_fill = 3 * sizeof(u32); - - if (fill < min_fill) - fill += 64; - - k = j; - j += (fill - min_fill) / sizeof(u32); - for (; k < j; k++) - bf[k] = 0; - switch (algt->ss_algo_id) { case SS_ID_HASH_MD5: - lebits = (__le64 *)&bf[j]; - *lebits = cpu_to_le64(byte_count << 3); - j += 2; + j = hash_pad(bf, 4096, j, byte_count, true, bs); break; case SS_ID_HASH_SHA1: case SS_ID_HASH_SHA224: case SS_ID_HASH_SHA256: - bebits = (__be64 *)&bf[j]; - *bebits = cpu_to_be64(byte_count << 3); - j += 2; + j = hash_pad(bf, 4096, j, byte_count, false, bs); break; } + if (!j) { + err = -EINVAL; + goto theend; + } addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE); if (dma_mapping_error(ss->dev, addr_pad)) { -- 2.34.1