@@ -525,14 +525,17 @@ DEF_HELPER_FLAGS_3(crypto_sha256h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
DEF_HELPER_FLAGS_2(crypto_sha256su0, TCG_CALL_NO_RWG, void, ptr, ptr)
DEF_HELPER_FLAGS_3(crypto_sha256su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
-DEF_HELPER_FLAGS_3(crypto_sha512h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
-DEF_HELPER_FLAGS_3(crypto_sha512h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
-DEF_HELPER_FLAGS_2(crypto_sha512su0, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_3(crypto_sha512su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(crypto_sha512h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha512h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha512su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha512su1, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(crypto_sm3tt, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32, i32)
-DEF_HELPER_FLAGS_3(crypto_sm3partw1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
-DEF_HELPER_FLAGS_3(crypto_sm3partw2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(crypto_sm3partw1, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3partw2, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(crypto_sm4e, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(crypto_sm4ekey, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
@@ -31,6 +31,19 @@ union CRYPTO_STATE {
#define CR_ST_WORD(state, i) (state.words[i])
#endif
+/*
+ * The caller has not been converted to full gvec, and so only
+ * modifies the low 16 bytes of the vector register.
+ */
+static void clear_tail_16(void *vd, uint32_t desc)
+{
+ int opr_sz = simd_oprsz(desc);
+ int max_sz = simd_maxsz(desc);
+
+ assert(opr_sz == 16);
+ clear_tail(vd, opr_sz, max_sz);
+}
+
static void do_crypto_aese(uint64_t *rd, uint64_t *rn,
uint64_t *rm, bool decrypt)
{
@@ -470,7 +483,7 @@ static uint64_t s1_512(uint64_t x)
return ror64(x, 19) ^ ror64(x, 61) ^ (x >> 6);
}
-void HELPER(crypto_sha512h)(void *vd, void *vn, void *vm)
+void HELPER(crypto_sha512h)(void *vd, void *vn, void *vm, uint32_t desc)
{
uint64_t *rd = vd;
uint64_t *rn = vn;
@@ -483,9 +496,11 @@ void HELPER(crypto_sha512h)(void *vd, void *vn, void *vm)
rd[0] = d0;
rd[1] = d1;
+
+ clear_tail_16(vd, desc);
}
-void HELPER(crypto_sha512h2)(void *vd, void *vn, void *vm)
+void HELPER(crypto_sha512h2)(void *vd, void *vn, void *vm, uint32_t desc)
{
uint64_t *rd = vd;
uint64_t *rn = vn;
@@ -498,9 +513,11 @@ void HELPER(crypto_sha512h2)(void *vd, void *vn, void *vm)
rd[0] = d0;
rd[1] = d1;
+
+ clear_tail_16(vd, desc);
}
-void HELPER(crypto_sha512su0)(void *vd, void *vn)
+void HELPER(crypto_sha512su0)(void *vd, void *vn, uint32_t desc)
{
uint64_t *rd = vd;
uint64_t *rn = vn;
@@ -512,9 +529,11 @@ void HELPER(crypto_sha512su0)(void *vd, void *vn)
rd[0] = d0;
rd[1] = d1;
+
+ clear_tail_16(vd, desc);
}
-void HELPER(crypto_sha512su1)(void *vd, void *vn, void *vm)
+void HELPER(crypto_sha512su1)(void *vd, void *vn, void *vm, uint32_t desc)
{
uint64_t *rd = vd;
uint64_t *rn = vn;
@@ -522,9 +541,11 @@ void HELPER(crypto_sha512su1)(void *vd, void *vn, void *vm)
rd[0] += s1_512(rn[0]) + rm[0];
rd[1] += s1_512(rn[1]) + rm[1];
+
+ clear_tail_16(vd, desc);
}
-void HELPER(crypto_sm3partw1)(void *vd, void *vn, void *vm)
+void HELPER(crypto_sm3partw1)(void *vd, void *vn, void *vm, uint32_t desc)
{
uint64_t *rd = vd;
uint64_t *rn = vn;
@@ -548,9 +569,11 @@ void HELPER(crypto_sm3partw1)(void *vd, void *vn, void *vm)
rd[0] = d.l[0];
rd[1] = d.l[1];
+
+ clear_tail_16(vd, desc);
}
-void HELPER(crypto_sm3partw2)(void *vd, void *vn, void *vm)
+void HELPER(crypto_sm3partw2)(void *vd, void *vn, void *vm, uint32_t desc)
{
uint64_t *rd = vd;
uint64_t *rn = vn;
@@ -568,6 +591,8 @@ void HELPER(crypto_sm3partw2)(void *vd, void *vn, void *vm)
rd[0] = d.l[0];
rd[1] = d.l[1];
+
+ clear_tail_16(vd, desc);
}
void HELPER(crypto_sm3tt)(void *vd, void *vn, void *vm, uint32_t imm2,
@@ -14668,28 +14668,36 @@ const GVecGen3 rax1_op =
static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
{
static const GVecGen3 sm4ekey_op = { .fno = gen_helper_crypto_sm4ekey };
+ static const GVecGen3 sha512h_op = { .fno = gen_helper_crypto_sha512h };
+ static const GVecGen3 sha512h2_op
+ = { .fno = gen_helper_crypto_sha512h2 };
+ static const GVecGen3 sha512su1_op
+ = { .fno = gen_helper_crypto_sha512su1 };
+ static const GVecGen3 sm3partw1_op
+ = { .fno = gen_helper_crypto_sm3partw1 };
+ static const GVecGen3 sm3partw2_op
+ = { .fno = gen_helper_crypto_sm3partw2 };
int opcode = extract32(insn, 10, 2);
int o = extract32(insn, 14, 1);
int rm = extract32(insn, 16, 5);
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
bool feature;
- CryptoThreeOpFn *genfn = NULL;
const GVecGen3 *gvecop = NULL;
if (o == 0) {
switch (opcode) {
case 0: /* SHA512H */
feature = dc_isar_feature(aa64_sha512, s);
- genfn = gen_helper_crypto_sha512h;
+ gvecop = &sha512h_op;
break;
case 1: /* SHA512H2 */
feature = dc_isar_feature(aa64_sha512, s);
- genfn = gen_helper_crypto_sha512h2;
+ gvecop = &sha512h2_op;
break;
case 2: /* SHA512SU1 */
feature = dc_isar_feature(aa64_sha512, s);
- genfn = gen_helper_crypto_sha512su1;
+ gvecop = &sha512su1_op;
break;
case 3: /* RAX1 */
feature = dc_isar_feature(aa64_sha3, s);
@@ -14702,11 +14710,11 @@ static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
switch (opcode) {
case 0: /* SM3PARTW1 */
feature = dc_isar_feature(aa64_sm3, s);
- genfn = gen_helper_crypto_sm3partw1;
+ gvecop = &sm3partw1_op;
break;
case 1: /* SM3PARTW2 */
feature = dc_isar_feature(aa64_sm3, s);
- genfn = gen_helper_crypto_sm3partw2;
+ gvecop = &sm3partw2_op;
break;
case 2: /* SM4EKEY */
feature = dc_isar_feature(aa64_sm4, s);
@@ -14726,22 +14734,7 @@ static void disas_crypto_three_reg_sha512(DisasContext *s, uint32_t insn)
if (!fp_access_check(s)) {
return;
}
-
- if (gvecop) {
- gen_gvec_op3(s, true, rd, rn, rm, gvecop);
- } else {
- TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
-
- tcg_rd_ptr = vec_full_reg_ptr(s, rd);
- tcg_rn_ptr = vec_full_reg_ptr(s, rn);
- tcg_rm_ptr = vec_full_reg_ptr(s, rm);
-
- genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr);
-
- tcg_temp_free_ptr(tcg_rd_ptr);
- tcg_temp_free_ptr(tcg_rn_ptr);
- tcg_temp_free_ptr(tcg_rm_ptr);
- }
+ gen_gvec_op3(s, true, rd, rn, rm, gvecop);
}
/* Crypto two-reg SHA512
@@ -14755,19 +14748,14 @@ static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
int opcode = extract32(insn, 10, 2);
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
- TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
bool feature;
- CryptoTwoOpFn *genfn;
- gen_helper_gvec_3 *gvecfn = NULL;
switch (opcode) {
case 0: /* SHA512SU0 */
feature = dc_isar_feature(aa64_sha512, s);
- genfn = gen_helper_crypto_sha512su0;
break;
case 1: /* SM4E */
feature = dc_isar_feature(aa64_sm4, s);
- gvecfn = gen_helper_crypto_sm4e;
break;
default:
unallocated_encoding(s);
@@ -14783,18 +14771,16 @@ static void disas_crypto_two_reg_sha512(DisasContext *s, uint32_t insn)
return;
}
- if (gvecfn) {
- gen_gvec_op3_ool(s, true, rd, rd, rn, 0, gvecfn);
- return;
+ switch (opcode) {
+ case 0: /* SHA512SU0 */
+ gen_gvec_op2_ool(s, true, rd, rn, 0, gen_helper_crypto_sha512su0);
+ break;
+ case 1: /* SM4E */
+ gen_gvec_op3_ool(s, true, rd, rd, rn, 0, gen_helper_crypto_sm4e);
+ break;
+ default:
+ g_assert_not_reached();
}
-
- tcg_rd_ptr = vec_full_reg_ptr(s, rd);
- tcg_rn_ptr = vec_full_reg_ptr(s, rn);
-
- genfn(tcg_rd_ptr, tcg_rn_ptr);
-
- tcg_temp_free_ptr(tcg_rd_ptr);
- tcg_temp_free_ptr(tcg_rn_ptr);
}
/* Crypto four-register
Do not yet convert the helpers to loop over opr_sz, but the descriptor allows the vector tail to be cleared. Which fixes an existing bug. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/arm/helper.h | 15 +++++---- target/arm/crypto_helper.c | 37 +++++++++++++++++++---- target/arm/translate-a64.c | 62 +++++++++++++++----------------------- 3 files changed, 64 insertions(+), 50 deletions(-) -- 2.20.1