@@ -793,6 +793,20 @@ DEF_HELPER_FLAGS_4(gvec_mul_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_mul_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_mul_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mla_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mla_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mla_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_mls_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mls_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mls_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
#ifdef TARGET_AARCH64
#include "helper-a64.h"
#include "helper-sve.h"
@@ -785,6 +785,14 @@ SDOT_zzxw_d 01000100 .. 1 ..... 000000 ..... ..... @rrxr_d
UDOT_zzxw_s 01000100 .. 1 ..... 000001 ..... ..... @rrxr_s
UDOT_zzxw_d 01000100 .. 1 ..... 000001 ..... ..... @rrxr_d
+# SVE2 integer multiply-add (indexed)
+MLA_zzxz_h 01000100 .. 1 ..... 000010 ..... ..... @rrxr_h
+MLA_zzxz_s 01000100 .. 1 ..... 000010 ..... ..... @rrxr_s
+MLA_zzxz_d 01000100 .. 1 ..... 000010 ..... ..... @rrxr_d
+MLS_zzxz_h 01000100 .. 1 ..... 000011 ..... ..... @rrxr_h
+MLS_zzxz_s 01000100 .. 1 ..... 000011 ..... ..... @rrxr_s
+MLS_zzxz_d 01000100 .. 1 ..... 000011 ..... ..... @rrxr_d
+
# SVE2 integer multiply (indexed)
MUL_zzx_h 01000100 .. 1 ..... 111110 ..... ..... @rrx_h
MUL_zzx_s 01000100 .. 1 ..... 111110 ..... ..... @rrx_s
@@ -3870,6 +3870,29 @@ DO_SVE2_RRX(trans_MUL_zzx_d, gen_helper_gvec_mul_idx_d)
#undef DO_SVE2_RRX
+static bool do_sve2_zzxz_ool(DisasContext *s, arg_rrxr_esz *a,
+ gen_helper_gvec_4 *fn)
+{
+ if (!dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ return do_zzxz_ool(s, a, fn);
+}
+
+#define DO_SVE2_RRXR(NAME, FUNC) \
+ static bool NAME(DisasContext *s, arg_rrxr_esz *a) \
+ { return do_sve2_zzxz_ool(s, a, FUNC); }
+
+DO_SVE2_RRXR(trans_MLA_zzxz_h, gen_helper_gvec_mla_idx_h)
+DO_SVE2_RRXR(trans_MLA_zzxz_s, gen_helper_gvec_mla_idx_s)
+DO_SVE2_RRXR(trans_MLA_zzxz_d, gen_helper_gvec_mla_idx_d)
+
+DO_SVE2_RRXR(trans_MLS_zzxz_h, gen_helper_gvec_mls_idx_h)
+DO_SVE2_RRXR(trans_MLS_zzxz_s, gen_helper_gvec_mls_idx_s)
+DO_SVE2_RRXR(trans_MLS_zzxz_d, gen_helper_gvec_mls_idx_d)
+
+#undef DO_SVE2_RRXR
+
/*
*** SVE Floating Point Multiply-Add Indexed Group
*/
@@ -883,6 +883,31 @@ DO_MUL_IDX(gvec_mul_idx_d, uint64_t, )
#undef DO_MUL_IDX
+#define DO_MLA_IDX(NAME, TYPE, OP, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \
+ intptr_t idx = simd_data(desc); \
+ TYPE *d = vd, *n = vn, *m = vm, *a = va; \
+ for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
+ TYPE mm = m[H(i + idx)]; \
+ for (j = 0; j < segment; j++) { \
+ d[i + j] = a[i + j] OP n[i + j] * mm; \
+ } \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+}
+
+DO_MLA_IDX(gvec_mla_idx_h, uint16_t, +, H2)
+DO_MLA_IDX(gvec_mla_idx_s, uint32_t, +, H4)
+DO_MLA_IDX(gvec_mla_idx_d, uint64_t, +, )
+
+DO_MLA_IDX(gvec_mls_idx_h, uint16_t, -, H2)
+DO_MLA_IDX(gvec_mls_idx_s, uint32_t, -, H4)
+DO_MLA_IDX(gvec_mls_idx_d, uint64_t, -, )
+
+#undef DO_MLA_IDX
+
#define DO_FMUL_IDX(NAME, TYPE, H) \
void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
{ \
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/arm/helper.h | 14 ++++++++++++++ target/arm/sve.decode | 8 ++++++++ target/arm/translate-sve.c | 23 +++++++++++++++++++++++ target/arm/vec_helper.c | 25 +++++++++++++++++++++++++ 4 files changed, 70 insertions(+) -- 2.25.1