@@ -27,11 +27,13 @@
&i imm
&qrr_e q rd rn esz
&qrrr_e q rd rn rm esz
+&qrrrr_e q rd rn rm ra esz
@rr_q1e0 ........ ........ ...... rn:5 rd:5 &qrr_e q=1 esz=0
@r2r_q1e0 ........ ........ ...... rm:5 rd:5 &qrrr_e rn=%rd q=1 esz=0
@rrr_q1e0 ........ ... rm:5 ...... rn:5 rd:5 &qrrr_e q=1 esz=0
@rrr_q1e3 ........ ... rm:5 ...... rn:5 rd:5 &qrrr_e q=1 esz=3
+@rrrr_q1e3 ........ ... rm:5 . ra:5 rn:5 rd:5 &qrrrr_e q=1 esz=3
### Data Processing - Immediate
@@ -636,3 +638,9 @@ SM4EKEY 1100 1110 011 ..... 110010 ..... ..... @rrr_q1e0
SHA512SU0 1100 1110 110 00000 100000 ..... ..... @rr_q1e0
SM4E 1100 1110 110 00000 100001 ..... ..... @r2r_q1e0
+
+### Cryptographic four-register
+
+EOR3 1100 1110 000 ..... 0 ..... ..... ..... @rrrr_q1e3
+BCAX 1100 1110 001 ..... 0 ..... ..... ..... @rrrr_q1e3
+SM3SS1 1100 1110 010 ..... 0 ..... ..... ..... @rrrr_q1e3
@@ -1352,6 +1352,17 @@ static bool do_gvec_fn3(DisasContext *s, arg_qrrr_e *a, GVecGen3Fn *fn)
return true;
}
+static bool do_gvec_fn4(DisasContext *s, arg_qrrrr_e *a, GVecGen4Fn *fn)
+{
+ if (!a->q && a->esz == MO_64) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ gen_gvec_fn4(s, a->q, a->rd, a->rn, a->rm, a->ra, fn, a->esz);
+ }
+ return true;
+}
+
/*
* This utility function is for doing register extension with an
* optional shift. You will likely want to pass a temporary for the
@@ -4632,6 +4643,38 @@ TRANS_FEAT(SM4EKEY, aa64_sm4, do_gvec_op3_ool, a, 0, gen_helper_crypto_sm4ekey)
TRANS_FEAT(SHA512SU0, aa64_sha512, do_gvec_op2_ool, a, 0, gen_helper_crypto_sha512su0)
TRANS_FEAT(SM4E, aa64_sm4, do_gvec_op3_ool, a, 0, gen_helper_crypto_sm4e)
+TRANS_FEAT(EOR3, aa64_sha3, do_gvec_fn4, a, gen_gvec_eor3)
+TRANS_FEAT(BCAX, aa64_sha3, do_gvec_fn4, a, gen_gvec_bcax)
+
+static bool trans_SM3SS1(DisasContext *s, arg_SM3SS1 *a)
+{
+ if (!dc_isar_feature(aa64_sm3, s)) {
+ return false;
+ }
+ if (fp_access_check(s)) {
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op3 = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+ unsigned vsz, dofs;
+
+ read_vec_element_i32(s, tcg_op1, a->rn, 3, MO_32);
+ read_vec_element_i32(s, tcg_op2, a->rm, 3, MO_32);
+ read_vec_element_i32(s, tcg_op3, a->ra, 3, MO_32);
+
+ tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
+ tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
+ tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
+ tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
+
+ /* Clear the whole register first, then store bits [127:96]. */
+ vsz = vec_full_reg_size(s);
+ dofs = vec_full_reg_offset(s, a->rd);
+ tcg_gen_gvec_dup_imm(MO_64, dofs, vsz, vsz, 0);
+ write_vec_element_i32(s, tcg_res, a->rd, 3, MO_32);
+ }
+ return true;
+}
/* Shift a TCGv src by TCGv shift_amount, put result in dst.
* Note that it is the caller's responsibility to ensure that the
@@ -13533,94 +13576,6 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn)
}
}
-/* Crypto four-register
- * 31 23 22 21 20 16 15 14 10 9 5 4 0
- * +-------------------+-----+------+---+------+------+------+
- * | 1 1 0 0 1 1 1 0 0 | Op0 | Rm | 0 | Ra | Rn | Rd |
- * +-------------------+-----+------+---+------+------+------+
- */
-static void disas_crypto_four_reg(DisasContext *s, uint32_t insn)
-{
- int op0 = extract32(insn, 21, 2);
- int rm = extract32(insn, 16, 5);
- int ra = extract32(insn, 10, 5);
- int rn = extract32(insn, 5, 5);
- int rd = extract32(insn, 0, 5);
- bool feature;
-
- switch (op0) {
- case 0: /* EOR3 */
- case 1: /* BCAX */
- feature = dc_isar_feature(aa64_sha3, s);
- break;
- case 2: /* SM3SS1 */
- feature = dc_isar_feature(aa64_sm3, s);
- break;
- default:
- unallocated_encoding(s);
- return;
- }
-
- if (!feature) {
- unallocated_encoding(s);
- return;
- }
-
- if (!fp_access_check(s)) {
- return;
- }
-
- if (op0 < 2) {
- TCGv_i64 tcg_op1, tcg_op2, tcg_op3, tcg_res[2];
- int pass;
-
- tcg_op1 = tcg_temp_new_i64();
- tcg_op2 = tcg_temp_new_i64();
- tcg_op3 = tcg_temp_new_i64();
- tcg_res[0] = tcg_temp_new_i64();
- tcg_res[1] = tcg_temp_new_i64();
-
- for (pass = 0; pass < 2; pass++) {
- read_vec_element(s, tcg_op1, rn, pass, MO_64);
- read_vec_element(s, tcg_op2, rm, pass, MO_64);
- read_vec_element(s, tcg_op3, ra, pass, MO_64);
-
- if (op0 == 0) {
- /* EOR3 */
- tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op3);
- } else {
- /* BCAX */
- tcg_gen_andc_i64(tcg_res[pass], tcg_op2, tcg_op3);
- }
- tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
- }
- write_vec_element(s, tcg_res[0], rd, 0, MO_64);
- write_vec_element(s, tcg_res[1], rd, 1, MO_64);
- } else {
- TCGv_i32 tcg_op1, tcg_op2, tcg_op3, tcg_res, tcg_zero;
-
- tcg_op1 = tcg_temp_new_i32();
- tcg_op2 = tcg_temp_new_i32();
- tcg_op3 = tcg_temp_new_i32();
- tcg_res = tcg_temp_new_i32();
- tcg_zero = tcg_constant_i32(0);
-
- read_vec_element_i32(s, tcg_op1, rn, 3, MO_32);
- read_vec_element_i32(s, tcg_op2, rm, 3, MO_32);
- read_vec_element_i32(s, tcg_op3, ra, 3, MO_32);
-
- tcg_gen_rotri_i32(tcg_res, tcg_op1, 20);
- tcg_gen_add_i32(tcg_res, tcg_res, tcg_op2);
- tcg_gen_add_i32(tcg_res, tcg_res, tcg_op3);
- tcg_gen_rotri_i32(tcg_res, tcg_res, 25);
-
- write_vec_element_i32(s, tcg_zero, rd, 0, MO_32);
- write_vec_element_i32(s, tcg_zero, rd, 1, MO_32);
- write_vec_element_i32(s, tcg_zero, rd, 2, MO_32);
- write_vec_element_i32(s, tcg_res, rd, 3, MO_32);
- }
-}
-
/* Crypto XAR
* 31 21 20 16 15 10 9 5 4 0
* +-----------------------+------+--------+------+------+
@@ -13707,7 +13662,6 @@ static const AArch64DecodeTable data_proc_simd[] = {
{ 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
{ 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
{ 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
- { 0xce000000, 0xff808000, disas_crypto_four_reg },
{ 0xce800000, 0xffe00000, disas_crypto_xar },
{ 0xce408000, 0xffe0c000, disas_crypto_three_reg_imm2 },
{ 0x0e400400, 0x9f60c400, disas_simd_three_reg_same_fp16 },