@@ -789,6 +789,10 @@ DEF_HELPER_FLAGS_4(gvec_uaba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_mul_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_mul_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_mul_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
#ifdef TARGET_AARCH64
#include "helper-a64.h"
#include "helper-sve.h"
@@ -777,12 +777,19 @@ MUL_zzi 00100101 .. 110 000 110 ........ ..... @rdn_i8s
DOT_zzzz 01000100 1 sz:1 0 rm:5 00000 u:1 rn:5 rd:5 \
ra=%reg_movprfx
+#### SVE Multiply - Indexed
+
# SVE integer dot product (indexed)
SDOT_zzxw_s 01000100 .. 1 ..... 000000 ..... ..... @rrxr_s
SDOT_zzxw_d 01000100 .. 1 ..... 000000 ..... ..... @rrxr_d
UDOT_zzxw_s 01000100 .. 1 ..... 000001 ..... ..... @rrxr_s
UDOT_zzxw_d 01000100 .. 1 ..... 000001 ..... ..... @rrxr_d
+# SVE2 integer multiply (indexed)
+MUL_zzx_h 01000100 .. 1 ..... 111110 ..... ..... @rrx_h
+MUL_zzx_s 01000100 .. 1 ..... 111110 ..... ..... @rrx_s
+MUL_zzx_d 01000100 .. 1 ..... 111110 ..... ..... @rrx_d
+
# SVE floating-point complex add (predicated)
FCADD 01100100 esz:2 00000 rot:1 100 pg:3 rm:5 rd:5 \
rn=%reg_movprfx
@@ -3817,6 +3817,10 @@ static bool trans_DOT_zzzz(DisasContext *s, arg_DOT_zzzz *a)
return true;
}
+/*
+ * SVE Multiply - Indexed
+ */
+
static bool do_zzxz_ool(DisasContext *s, arg_rrxr_esz *a,
gen_helper_gvec_4 *fn)
{
@@ -3840,6 +3844,32 @@ DO_RRXR(trans_UDOT_zzxw_d, gen_helper_gvec_udot_idx_h)
#undef DO_RRXR
+static bool do_sve2_zzx_ool(DisasContext *s, arg_rrx_esz *a,
+ gen_helper_gvec_3 *fn)
+{
+ if (fn == NULL || !dc_isar_feature(aa64_sve2, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vsz, vsz, a->index, fn);
+ }
+ return true;
+}
+
+#define DO_SVE2_RRX(NAME, FUNC) \
+ static bool NAME(DisasContext *s, arg_rrx_esz *a) \
+ { return do_sve2_zzx_ool(s, a, FUNC); }
+
+DO_SVE2_RRX(trans_MUL_zzx_h, gen_helper_gvec_mul_idx_h)
+DO_SVE2_RRX(trans_MUL_zzx_s, gen_helper_gvec_mul_idx_s)
+DO_SVE2_RRX(trans_MUL_zzx_d, gen_helper_gvec_mul_idx_d)
+
+#undef DO_SVE2_RRX
+
/*
*** SVE Floating Point Multiply-Add Indexed Group
*/
@@ -863,6 +863,27 @@ DO_3OP(gvec_rsqrts_d, helper_rsqrtsf_f64, float64)
*/
#define DO_MUL_IDX(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \
+ intptr_t idx = simd_data(desc); \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
+ TYPE mm = m[H(i + idx)]; \
+ for (j = 0; j < segment; j++) { \
+ d[i + j] = n[i + j] * mm; \
+ } \
+ } \
+ clear_tail(d, oprsz, simd_maxsz(desc)); \
+}
+
+DO_MUL_IDX(gvec_mul_idx_h, uint16_t, H2)
+DO_MUL_IDX(gvec_mul_idx_s, uint32_t, H4)
+DO_MUL_IDX(gvec_mul_idx_d, uint64_t, )
+
+#undef DO_MUL_IDX
+
+#define DO_FMUL_IDX(NAME, TYPE, H) \
void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
{ \
intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \
@@ -877,11 +898,11 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
clear_tail(d, oprsz, simd_maxsz(desc)); \
}
-DO_MUL_IDX(gvec_fmul_idx_h, float16, H2)
-DO_MUL_IDX(gvec_fmul_idx_s, float32, H4)
-DO_MUL_IDX(gvec_fmul_idx_d, float64, )
+DO_FMUL_IDX(gvec_fmul_idx_h, float16, H2)
+DO_FMUL_IDX(gvec_fmul_idx_s, float32, H4)
+DO_FMUL_IDX(gvec_fmul_idx_d, float64, )
-#undef DO_MUL_IDX
+#undef DO_FMUL_IDX
#define DO_FMLA_IDX(NAME, TYPE, H) \
void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, \
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/arm/helper.h | 4 ++++ target/arm/sve.decode | 7 +++++++ target/arm/translate-sve.c | 30 ++++++++++++++++++++++++++++++ target/arm/vec_helper.c | 29 +++++++++++++++++++++++++---- 4 files changed, 66 insertions(+), 4 deletions(-) -- 2.25.1