Co-authored-by: Nazar Kazakov <nazar.kazakov@xxxxxxxxxxxxxxx> Signed-off-by: Nazar Kazakov <nazar.kazakov@xxxxxxxxxxxxxxx> Signed-off-by: Lawrence Hunter <lawrence.hunter@xxxxxxxxxxxxxxx> --- target/riscv/helper.h | 2 + target/riscv/insn32.decode | 2 + target/riscv/insn_trans/trans_rvzvknh.c.inc | 2 + target/riscv/vcrypto_helper.c | 140 ++++++++++++++++++++ 4 files changed, 146 insertions(+) diff --git a/target/riscv/helper.h b/target/riscv/helper.h index 76ea2ff49b..77bbd9db56 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -1201,3 +1201,5 @@ DEF_HELPER_5(vaeskf1_vi, void, ptr, ptr, i32, env, i32) DEF_HELPER_5(vaeskf2_vi, void, ptr, ptr, i32, env, i32) DEF_HELPER_5(vsha2ms_vv, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vsha2ch_vv, void, ptr, ptr, ptr, env, i32) +DEF_HELPER_5(vsha2cl_vv, void, ptr, ptr, ptr, env, i32) diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index aef4d0b476..c95886040b 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -942,3 +942,5 @@ vaeskf2_vi 101010 1 ..... ..... 010 ..... 1110111 @r_vm_1 # *** RV64 Zvknh vector crypto extension *** vsha2ms_vv 101101 1 ..... ..... 010 ..... 1110111 @r_vm_1 +vsha2ch_vv 101110 1 ..... ..... 010 ..... 1110111 @r_vm_1 +vsha2cl_vv 101111 1 ..... ..... 010 ..... 1110111 @r_vm_1 diff --git a/target/riscv/insn_trans/trans_rvzvknh.c.inc b/target/riscv/insn_trans/trans_rvzvknh.c.inc index 97bdf4d72f..3cf3ceaf3a 100644 --- a/target/riscv/insn_trans/trans_rvzvknh.c.inc +++ b/target/riscv/insn_trans/trans_rvzvknh.c.inc @@ -80,3 +80,5 @@ static bool vsha_check(DisasContext *s, arg_rmrr *a) } GEN_VV_UNMASKED_TRANS(vsha2ms_vv, vsha_check, 4) +GEN_VV_UNMASKED_TRANS(vsha2cl_vv, vsha_check, 4) +GEN_VV_UNMASKED_TRANS(vsha2ch_vv, vsha_check, 4) diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c index ae253b3357..bf0455f8e0 100644 --- a/target/riscv/vcrypto_helper.c +++ b/target/riscv/vcrypto_helper.c @@ -526,3 +526,143 @@ void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz); env->vstart = 0; } + +static inline uint64_t sum0_64(uint64_t x) +{ + return ror64(x, 28) ^ ror64(x, 34) ^ ror64(x, 39); +} + +static inline uint32_t sum0_32(uint32_t x) +{ + return ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22); +} + +static inline uint64_t sum1_64(uint64_t x) +{ + return ror64(x, 14) ^ ror64(x, 18) ^ ror64(x, 41); +} + +static inline uint32_t sum1_32(uint32_t x) +{ + return ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25); +} + +#define ch(x, y, z) ((x & y) ^ ((~x) & z)) + +#define maj(x, y, z) ((x & y) ^ (x & z) ^ (y & z)) + +static void vsha2c_64(uint64_t *vs2, uint64_t *vd, uint64_t *vs1) +{ + uint64_t a = vs2[3], b = vs2[2], e = vs2[1], f = vs2[0]; + uint64_t c = vd[3], d = vd[2], g = vd[1], h = vd[0]; + uint64_t W0 = vs1[0], W1 = vs1[1]; + uint64_t T1 = h + sum1_64(e) + ch(e, f, g) + W0; + uint64_t T2 = sum0_64(a) + maj(a, b, c); + + h = g; + g = f; + f = e; + e = d + T1; + d = c; + c = b; + b = a; + a = T1 + T2; + + T1 = h + sum1_64(e) + ch(e, f, g) + W1; + T2 = sum0_64(a) + maj(a, b, c); + h = g; + g = f; + f = e; + e = d + T1; + d = c; + c = b; + b = a; + a = T1 + T2; + + vd[0] = f; + vd[1] = e; + vd[2] = b; + vd[3] = a; +} + +static void vsha2c_32(uint32_t *vs2, uint32_t *vd, uint32_t *vs1) +{ + uint32_t a = vs2[H4(3)], b = vs2[H4(2)], e = vs2[H4(1)], f = vs2[H4(0)]; + uint32_t c = vd[H4(3)], d = vd[H4(2)], g = vd[H4(1)], h = vd[H4(0)]; + uint32_t W0 = vs1[H4(0)], W1 = vs1[H4(1)]; + uint32_t T1 = h + sum1_32(e) + ch(e, f, g) + W0; + uint32_t T2 = sum0_32(a) + maj(a, b, c); + + h = g; + g = f; + f = e; + e = d + T1; + d = c; + c = b; + b = a; + a = T1 + T2; + + T1 = h + sum1_32(e) + ch(e, f, g) + W1; + T2 = sum0_32(a) + maj(a, b, c); + h = g; + g = f; + f = e; + e = d + T1; + d = c; + c = b; + b = a; + a = T1 + T2; + + vd[H4(0)] = f; + vd[H4(1)] = e; + vd[H4(2)] = b; + vd[H4(3)] = a; +} + +void HELPER(vsha2ch_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, + uint32_t desc) +{ + uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW); + uint32_t esz = sew == MO_64 ? 8 : 4; + uint32_t total_elems; + uint32_t vta = vext_vta(desc); + + for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { + if (sew == MO_64) { + vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i, + ((uint64_t *)vs1) + 4 * i + 2); + } else { + vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i, + ((uint32_t *)vs1) + 4 * i + 2); + } + } + + /* set tail elements to 1s */ + total_elems = vext_get_total_elems(env, desc, esz); + vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz); + env->vstart = 0; +} + +void HELPER(vsha2cl_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env, + uint32_t desc) +{ + uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW); + uint32_t esz = sew == MO_64 ? 8 : 4; + uint32_t total_elems; + uint32_t vta = vext_vta(desc); + + for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) { + if (sew == MO_64) { + vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i, + (((uint64_t *)vs1) + 4 * i)); + } else { + vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i, + (((uint32_t *)vs1) + 4 * i)); + } + } + + /* set tail elements to 1s */ + total_elems = vext_get_total_elems(env, desc, esz); + vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz); + env->vstart = 0; +} -- 2.39.2