@@ -1130,3 +1130,12 @@ DEF_HELPER_6(vslide1down_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(vrgather_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vrgather_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vrgather_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vrgather_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vrgather_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vrgather_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vrgather_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(vrgather_vx_d, void, ptr, ptr, tl, ptr, env, i32)
@@ -572,6 +572,9 @@ vslide1up_vx 001110 . ..... ..... 110 ..... 1010111 @r_vm
vslidedown_vx 001111 . ..... ..... 100 ..... 1010111 @r_vm
vslidedown_vi 001111 . ..... ..... 011 ..... 1010111 @r_vm
vslide1down_vx 001111 . ..... ..... 110 ..... 1010111 @r_vm
+vrgather_vv 001100 . ..... ..... 000 ..... 1010111 @r_vm
+vrgather_vx 001100 . ..... ..... 100 ..... 1010111 @r_vm
+vrgather_vi 001100 . ..... ..... 011 ..... 1010111 @r_vm
vsetvli 0 ........... ..... 111 ..... 1010111 @r2_zimm
vsetvl 1000000 ..... ..... 111 ..... 1010111 @r
@@ -2772,3 +2772,81 @@ GEN_OPIVI_TRANS(vslideup_vi, 1, vslideup_vx, slideup_check)
GEN_OPIVX_TRANS(vslidedown_vx, opivx_check)
GEN_OPIVX_TRANS(vslide1down_vx, opivx_check)
GEN_OPIVI_TRANS(vslidedown_vi, 1, vslidedown_vx, opivx_check)
+
+/* Vector Register Gather Instruction */
+static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs1, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ (a->rd != a->rs2) && (a->rd != a->rs1));
+}
+
+GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check)
+
+static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
+{
+ return (vext_check_isa_ill(s) &&
+ vext_check_overlap_mask(s, a->rd, a->vm, true) &&
+ vext_check_reg(s, a->rd, false) &&
+ vext_check_reg(s, a->rs2, false) &&
+ (a->rd != a->rs2));
+}
+
+/* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
+static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
+{
+ if (!vrgather_vx_check(s, a)) {
+ return false;
+ }
+
+ if (a->vm && s->vl_eq_vlmax) {
+ int vlmax = s->vlen / s->mlen;
+ TCGv_i64 dest = tcg_temp_new_i64();
+
+ if (a->rs1 == 0) {
+ vec_element_loadi(s, dest, a->rs2, 0);
+ } else {
+ vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax);
+ }
+
+ tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
+ MAXSZ(s), MAXSZ(s), dest);
+ tcg_temp_free_i64(dest);
+ } else {
+ static gen_helper_opivx * const fns[4] = {
+ gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
+ gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
+ };
+ return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);
+ }
+ return true;
+}
+
+/* vrgather.vi vd, vs2, imm, vm # vd[i] = (imm >= VLMAX) ? 0 : vs2[imm] */
+static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
+{
+ if (!vrgather_vx_check(s, a)) {
+ return false;
+ }
+
+ if (a->vm && s->vl_eq_vlmax) {
+ if (a->rs1 >= s->vlen / s->mlen) {
+ tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd),
+ MAXSZ(s), MAXSZ(s), 0);
+ } else {
+ tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd),
+ endian_ofs(s, a->rs2, a->rs1),
+ MAXSZ(s), MAXSZ(s));
+ }
+ } else {
+ static gen_helper_opivx * const fns[4] = {
+ gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
+ gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
+ };
+ return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s, 1);
+ }
+ return true;
+}
@@ -4810,3 +4810,63 @@ GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b, uint8_t, H1, clearb)
GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h, uint16_t, H2, clearh)
GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w, uint32_t, H4, clearl)
GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d, uint64_t, H8, clearq)
+
+/* Vector Register Gather Instruction */
+#define GEN_VEXT_VRGATHER_VV(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t index, i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ index = *((ETYPE *)vs1 + H(i)); \
+ if (index >= vlmax) { \
+ *((ETYPE *)vd + H(i)) = 0; \
+ } else { \
+ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index)); \
+ } \
+ } \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+}
+
+/* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */
+GEN_VEXT_VRGATHER_VV(vrgather_vv_b, uint8_t, H1, clearb)
+GEN_VEXT_VRGATHER_VV(vrgather_vv_h, uint16_t, H2, clearh)
+GEN_VEXT_VRGATHER_VV(vrgather_vv_w, uint32_t, H4, clearl)
+GEN_VEXT_VRGATHER_VV(vrgather_vv_d, uint64_t, H8, clearq)
+
+#define GEN_VEXT_VRGATHER_VX(NAME, ETYPE, H, CLEAR_FN) \
+void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
+ CPURISCVState *env, uint32_t desc) \
+{ \
+ uint32_t mlen = vext_mlen(desc); \
+ uint32_t vlmax = env_archcpu(env)->cfg.vlen / mlen; \
+ uint32_t vm = vext_vm(desc); \
+ uint32_t vl = env->vl; \
+ uint32_t index = s1, i; \
+ \
+ for (i = 0; i < vl; i++) { \
+ if (!vm && !vext_elem_mask(v0, mlen, i)) { \
+ continue; \
+ } \
+ if (index >= vlmax) { \
+ *((ETYPE *)vd + H(i)) = 0; \
+ } else { \
+ *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index)); \
+ } \
+ } \
+ CLEAR_FN(vd, vl, vl * sizeof(ETYPE), vlmax * sizeof(ETYPE)); \
+}
+
+/* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
+GEN_VEXT_VRGATHER_VX(vrgather_vx_b, uint8_t, H1, clearb)
+GEN_VEXT_VRGATHER_VX(vrgather_vx_h, uint16_t, H2, clearh)
+GEN_VEXT_VRGATHER_VX(vrgather_vx_w, uint32_t, H4, clearl)
+GEN_VEXT_VRGATHER_VX(vrgather_vx_d, uint64_t, H8, clearq)