@@ -517,13 +517,13 @@ DEF_HELPER_FLAGS_4(crypto_aese, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(crypto_aesmc, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(crypto_sha1_3reg, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_2(crypto_sha1h, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(crypto_sha1su1, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_3(crypto_sha1h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha1su1, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_sha256h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
-DEF_HELPER_FLAGS_3(crypto_sha256h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
-DEF_HELPER_FLAGS_2(crypto_sha256su0, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_3(crypto_sha256su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_4(crypto_sha256h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha256h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha256su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha256su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(crypto_sha512h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(crypto_sha512h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
@@ -303,7 +303,7 @@ void HELPER(crypto_sha1_3reg)(void *vd, void *vn, void *vm, uint32_t op)
rd[1] = d.l[1];
}
-void HELPER(crypto_sha1h)(void *vd, void *vm)
+void HELPER(crypto_sha1h)(void *vd, void *vm, uint32_t desc)
{
uint64_t *rd = vd;
uint64_t *rm = vm;
@@ -314,9 +314,11 @@ void HELPER(crypto_sha1h)(void *vd, void *vm)
rd[0] = m.l[0];
rd[1] = m.l[1];
+
+ clear_tail_16(vd, desc);
}
-void HELPER(crypto_sha1su1)(void *vd, void *vm)
+void HELPER(crypto_sha1su1)(void *vd, void *vm, uint32_t desc)
{
uint64_t *rd = vd;
uint64_t *rm = vm;
@@ -330,6 +332,8 @@ void HELPER(crypto_sha1su1)(void *vd, void *vm)
rd[0] = d.l[0];
rd[1] = d.l[1];
+
+ clear_tail_16(vd, desc);
}
/*
@@ -357,7 +361,7 @@ static uint32_t s1(uint32_t x)
return ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10);
}
-void HELPER(crypto_sha256h)(void *vd, void *vn, void *vm)
+void HELPER(crypto_sha256h)(void *vd, void *vn, void *vm, uint32_t desc)
{
uint64_t *rd = vd;
uint64_t *rn = vn;
@@ -388,9 +392,11 @@ void HELPER(crypto_sha256h)(void *vd, void *vn, void *vm)
rd[0] = d.l[0];
rd[1] = d.l[1];
+
+ clear_tail_16(vd, desc);
}
-void HELPER(crypto_sha256h2)(void *vd, void *vn, void *vm)
+void HELPER(crypto_sha256h2)(void *vd, void *vn, void *vm, uint32_t desc)
{
uint64_t *rd = vd;
uint64_t *rn = vn;
@@ -413,9 +419,11 @@ void HELPER(crypto_sha256h2)(void *vd, void *vn, void *vm)
rd[0] = d.l[0];
rd[1] = d.l[1];
+
+ clear_tail_16(vd, desc);
}
-void HELPER(crypto_sha256su0)(void *vd, void *vm)
+void HELPER(crypto_sha256su0)(void *vd, void *vm, uint32_t desc)
{
uint64_t *rd = vd;
uint64_t *rm = vm;
@@ -429,9 +437,11 @@ void HELPER(crypto_sha256su0)(void *vd, void *vm)
rd[0] = d.l[0];
rd[1] = d.l[1];
+
+ clear_tail_16(vd, desc);
}
-void HELPER(crypto_sha256su1)(void *vd, void *vn, void *vm)
+void HELPER(crypto_sha256su1)(void *vd, void *vn, void *vm, uint32_t desc)
{
uint64_t *rd = vd;
uint64_t *rn = vn;
@@ -447,6 +457,8 @@ void HELPER(crypto_sha256su1)(void *vd, void *vn, void *vm)
rd[0] = d.l[0];
rd[1] = d.l[1];
+
+ clear_tail_16(vd, desc);
}
/*
@@ -14517,8 +14517,7 @@ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
int rm = extract32(insn, 16, 5);
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
- CryptoThreeOpFn *genfn;
- TCGv_ptr tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr;
+ gen_helper_gvec_3 *genfn;
bool feature;
if (size != 0) {
@@ -14560,23 +14559,22 @@ static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
return;
}
- tcg_rd_ptr = vec_full_reg_ptr(s, rd);
- tcg_rn_ptr = vec_full_reg_ptr(s, rn);
- tcg_rm_ptr = vec_full_reg_ptr(s, rm);
-
if (genfn) {
- genfn(tcg_rd_ptr, tcg_rn_ptr, tcg_rm_ptr);
+ gen_gvec_op3_ool(s, true, rd, rn, rm, 0, genfn);
} else {
TCGv_i32 tcg_opcode = tcg_const_i32(opcode);
+ TCGv_ptr tcg_rd_ptr = vec_full_reg_ptr(s, rd);
+ TCGv_ptr tcg_rn_ptr = vec_full_reg_ptr(s, rn);
+ TCGv_ptr tcg_rm_ptr = vec_full_reg_ptr(s, rm);
gen_helper_crypto_sha1_3reg(tcg_rd_ptr, tcg_rn_ptr,
tcg_rm_ptr, tcg_opcode);
- tcg_temp_free_i32(tcg_opcode);
- }
- tcg_temp_free_ptr(tcg_rd_ptr);
- tcg_temp_free_ptr(tcg_rn_ptr);
- tcg_temp_free_ptr(tcg_rm_ptr);
+ tcg_temp_free_i32(tcg_opcode);
+ tcg_temp_free_ptr(tcg_rd_ptr);
+ tcg_temp_free_ptr(tcg_rn_ptr);
+ tcg_temp_free_ptr(tcg_rm_ptr);
+ }
}
/* Crypto two-reg SHA
@@ -14591,9 +14589,8 @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
int opcode = extract32(insn, 12, 5);
int rn = extract32(insn, 5, 5);
int rd = extract32(insn, 0, 5);
- CryptoTwoOpFn *genfn;
+ gen_helper_gvec_2 *genfn;
bool feature;
- TCGv_ptr tcg_rd_ptr, tcg_rn_ptr;
if (size != 0) {
unallocated_encoding(s);
@@ -14626,14 +14623,7 @@ static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
if (!fp_access_check(s)) {
return;
}
-
- tcg_rd_ptr = vec_full_reg_ptr(s, rd);
- tcg_rn_ptr = vec_full_reg_ptr(s, rn);
-
- genfn(tcg_rd_ptr, tcg_rn_ptr);
-
- tcg_temp_free_ptr(tcg_rd_ptr);
- tcg_temp_free_ptr(tcg_rn_ptr);
+ gen_gvec_op2_ool(s, true, rd, rn, 0, genfn);
}
static void gen_rax1_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m)
@@ -5364,28 +5364,30 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
tmp4 = tcg_const_i32(size);
gen_helper_crypto_sha1_3reg(ptr1, ptr2, ptr3, tmp4);
tcg_temp_free_i32(tmp4);
+ tcg_temp_free_ptr(ptr1);
+ tcg_temp_free_ptr(ptr2);
+ tcg_temp_free_ptr(ptr3);
} else { /* SHA-256 */
- if (!dc_isar_feature(aa32_sha2, s) || size == 3) {
+ gen_helper_gvec_3 *fn;
+
+ if (!dc_isar_feature(aa32_sha2, s)) {
return 1;
}
- ptr1 = vfp_reg_ptr(true, rd);
- ptr2 = vfp_reg_ptr(true, rn);
- ptr3 = vfp_reg_ptr(true, rm);
switch (size) {
case 0:
- gen_helper_crypto_sha256h(ptr1, ptr2, ptr3);
+ fn = gen_helper_crypto_sha256h;
break;
case 1:
- gen_helper_crypto_sha256h2(ptr1, ptr2, ptr3);
+ fn = gen_helper_crypto_sha256h2;
break;
case 2:
- gen_helper_crypto_sha256su1(ptr1, ptr2, ptr3);
+ fn = gen_helper_crypto_sha256su1;
break;
+ default:
+ return 1;
}
+ tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, 16, 16, 0, fn);
}
- tcg_temp_free_ptr(ptr1);
- tcg_temp_free_ptr(ptr2);
- tcg_temp_free_ptr(ptr3);
return 0;
case NEON_3R_VPADD_VQRDMLAH:
@@ -6947,13 +6949,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
if (!dc_isar_feature(aa32_sha1, s) || ((rm | rd) & 1)) {
return 1;
}
- ptr1 = vfp_reg_ptr(true, rd);
- ptr2 = vfp_reg_ptr(true, rm);
-
- gen_helper_crypto_sha1h(ptr1, ptr2);
-
- tcg_temp_free_ptr(ptr1);
- tcg_temp_free_ptr(ptr2);
+ tcg_gen_gvec_2_ool(rd_ofs, rm_ofs, 16, 16, 0,
+ gen_helper_crypto_sha1h);
break;
case NEON_2RM_SHA1SU1:
if ((rm | rd) & 1) {
@@ -6967,17 +6964,10 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
} else if (!dc_isar_feature(aa32_sha1, s)) {
return 1;
}
- ptr1 = vfp_reg_ptr(true, rd);
- ptr2 = vfp_reg_ptr(true, rm);
- if (q) {
- gen_helper_crypto_sha256su0(ptr1, ptr2);
- } else {
- gen_helper_crypto_sha1su1(ptr1, ptr2);
- }
- tcg_temp_free_ptr(ptr1);
- tcg_temp_free_ptr(ptr2);
+ tcg_gen_gvec_2_ool(rd_ofs, rm_ofs, 16, 16, 0,
+ q ? gen_helper_crypto_sha256su0
+ : gen_helper_crypto_sha1su1);
break;
-
case NEON_2RM_VMVN:
tcg_gen_gvec_not(0, rd_ofs, rm_ofs, vec_size, vec_size);
break;
Do not yet convert the helpers to loop over opr_sz, but the descriptor allows the vector tail to be cleared. Which fixes an existing bug. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/arm/helper.h | 12 +++++------ target/arm/crypto_helper.c | 24 +++++++++++++++------ target/arm/translate-a64.c | 34 +++++++++++------------------ target/arm/translate.c | 44 +++++++++++++++----------------------- 4 files changed, 53 insertions(+), 61 deletions(-) -- 2.20.1