@@ -180,6 +180,36 @@ uint64_t HELPER(simd_tbl)(CPUARMState *env, uint64_t result, uint64_t indices,
return result;
}
+/* Helper function for 64 bit polynomial multiply case:
+ * perform PolynomialMult(op1, op2) and return either the top or
+ * bottom half of the 128 bit result.
+ */
+uint64_t HELPER(neon_pmull_64_lo)(uint64_t op1, uint64_t op2)
+{
+ int bitnum;
+ uint64_t res = 0;
+
+ for (bitnum = 0; bitnum < 64; bitnum++) {
+ if (op1 & (1ULL << bitnum)) {
+ res ^= op2 << bitnum;
+ }
+ }
+ return res;
+}
+uint64_t HELPER(neon_pmull_64_hi)(uint64_t op1, uint64_t op2)
+{
+ int bitnum;
+ uint64_t res = 0;
+
+ /* bit 0 of op1 can't influence the high 64 bits at all */
+ for (bitnum = 1; bitnum < 64; bitnum++) {
+ if (op1 & (1ULL << bitnum)) {
+ res ^= op2 >> (64 - bitnum);
+ }
+ }
+ return res;
+}
+
/* 64bit/double versions of the neon float compare functions */
uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp)
{
@@ -27,6 +27,8 @@ DEF_HELPER_3(vfp_cmpes_a64, i64, f32, f32, ptr)
DEF_HELPER_3(vfp_cmpd_a64, i64, f64, f64, ptr)
DEF_HELPER_3(vfp_cmped_a64, i64, f64, f64, ptr)
DEF_HELPER_FLAGS_5(simd_tbl, TCG_CALL_NO_RWG_SE, i64, env, i64, i64, i32, i32)
+DEF_HELPER_FLAGS_2(neon_pmull_64_lo, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(neon_pmull_64_hi, TCG_CALL_NO_RWG_SE, i64, i64, i64)
DEF_HELPER_FLAGS_3(vfp_mulxs, TCG_CALL_NO_RWG, f32, f32, f32, ptr)
DEF_HELPER_FLAGS_3(vfp_mulxd, TCG_CALL_NO_RWG, f64, f64, f64, ptr)
DEF_HELPER_FLAGS_3(neon_ceq_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr)
@@ -7124,6 +7124,10 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
tcg_passres, tcg_passres);
break;
+ case 14: /* PMULL */
+ assert(size == 0);
+ gen_helper_neon_mull_p8(tcg_passres, tcg_op1, tcg_op2);
+ break;
default:
g_assert_not_reached();
}
@@ -7243,6 +7247,30 @@ static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
}
}
+static void handle_pmull_64(DisasContext *s, int is_q, int rd, int rn, int rm)
+{
+ /* PMULL of 64 x 64 -> 128 is an odd special case because it
+ * is the only three-reg-diff instruction which produces a
+ * 128-bit wide result from a single operation. However since
+ * it's possible to calculate the two halves more or less
+ * separately we just use two helper calls.
+ */
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op1, rn, is_q, MO_64);
+ read_vec_element(s, tcg_op2, rm, is_q, MO_64);
+ gen_helper_neon_pmull_64_lo(tcg_res, tcg_op1, tcg_op2);
+ write_vec_element(s, tcg_res, rd, 0, MO_64);
+ gen_helper_neon_pmull_64_hi(tcg_res, tcg_op1, tcg_op2);
+ write_vec_element(s, tcg_res, rd, 1, MO_64);
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_res);
+}
+
/* C3.6.15 AdvSIMD three different
* 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
* +---+---+---+-----------+------+---+------+--------+-----+------+------+
@@ -7293,8 +7321,15 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
- unsupported_encoding(s, insn);
- break;
+ if (size == 3) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_pmull_64(s, is_q, rd, rn, rm);
+ return;
+ }
+ goto is_widening;
case 9: /* SQDMLAL, SQDMLAL2 */
case 11: /* SQDMLSL, SQDMLSL2 */
case 13: /* SQDMULL, SQDMULL2 */
@@ -7315,6 +7350,7 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
return;
}
+ is_widening:
handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
break;
default:
@@ -9045,6 +9081,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
dc->vec_stride = 0;
dc->cp_regs = cpu->cp_regs;
dc->current_pl = arm_current_pl(env);
+ dc->features = env->features;
init_tmp_a64_array(dc);
@@ -10654,6 +10654,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
dc->cp_regs = cpu->cp_regs;
dc->current_pl = arm_current_pl(env);
+ dc->features = env->features;
cpu_F0s = tcg_temp_new_i32();
cpu_F1s = tcg_temp_new_i32();
@@ -26,6 +26,7 @@ typedef struct DisasContext {
int aarch64;
int current_pl;
GHashTable *cp_regs;
+ uint64_t features; /* CPU features bits */
#define TMP_A64_MAX 16
int tmp_a64_count;
TCGv_i64 tmp_a64[TMP_A64_MAX];
@@ -33,6 +34,11 @@ typedef struct DisasContext {
extern TCGv_ptr cpu_env;
+static inline int arm_dc_feature(DisasContext *dc, int feature)
+{
+ return (dc->features & (1ULL << feature)) != 0;
+}
+
/* target-specific extra values for is_jmp */
/* These instructions trap after executing, so the A32/T32 decoder must
* defer them until after the conditional execution state has been updated.