@@ -110,6 +110,7 @@ DEF_HELPER_FLAGS_3(bshuffle, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
DEF_HELPER_FLAGS_2(cmask8, TCG_CALL_NO_RWG_SE, i64, i64, i64)
DEF_HELPER_FLAGS_2(cmask16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
DEF_HELPER_FLAGS_2(cmask32, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fchksm16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
#define VIS_CMPHELPER(name) \
DEF_HELPER_FLAGS_2(f##name##16, TCG_CALL_NO_RWG_SE, \
i64, i64, i64) \
@@ -410,6 +410,7 @@ FCMPEq 10 000 cc:2 110101 ..... 0 0101 0111 ..... \
PDIST 10 ..... 110110 ..... 0 0011 1110 ..... \
&r_r_r_r rd=%dfp_rd rs1=%dfp_rd rs2=%dfp_rs1 rs3=%dfp_rs2
+ FCHKSM16 10 ..... 110110 ..... 0 0100 0100 ..... @d_d_d
FALIGNDATAg 10 ..... 110110 ..... 0 0100 1000 ..... @d_d_d
FPMERGE 10 ..... 110110 ..... 0 0100 1011 ..... @d_r_r
BSHUFFLE 10 ..... 110110 ..... 0 0100 1100 ..... @d_d_d
@@ -788,6 +788,37 @@ static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
tcg_gen_concat_i32_i64(dst, t0, t1);
}
+#ifdef TARGET_SPARC64
+static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
+ TCGv_vec src1, TCGv_vec src2)
+{
+ TCGv_vec a = tcg_temp_new_vec_matching(dst);
+ TCGv_vec c = tcg_temp_new_vec_matching(dst);
+
+ tcg_gen_add_vec(vece, a, src1, src2);
+ tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
+ /* Vector cmp produces -1 for true, so subtract to add carry. */
+ tcg_gen_sub_vec(vece, dst, a, c);
+}
+
+static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
+ uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
+ };
+ static const GVecGen3 op = {
+ .fni8 = gen_helper_fchksm16,
+ .fniv = gen_vec_fchksm16,
+ .opt_opc = vecop_list,
+ .vece = MO_16,
+ };
+ tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
+}
+#else
+#define gen_op_fchksm16 ({ qemu_build_not_reached(); NULL; })
+#endif
+
static void finishing_insn(DisasContext *dc)
{
/*
@@ -4761,6 +4792,7 @@ TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
+TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
@@ -389,3 +389,26 @@ uint64_t helper_cmask32(uint64_t gsr, uint64_t src)
return deposit64(gsr, 32, 32, mask);
}
+
+static inline uint16_t do_fchksm16(uint16_t src1, uint16_t src2)
+{
+ uint16_t a = src1 + src2;
+ uint16_t c = a < src1;
+ return a + c;
+}
+
+uint64_t helper_fchksm16(uint64_t src1, uint64_t src2)
+{
+ VIS64 r, s1, s2;
+
+ s1.ll = src1;
+ s2.ll = src2;
+ r.ll = 0;
+
+ r.VIS_W64(0) = do_fchksm16(s1.VIS_W64(0), s2.VIS_W64(0));
+ r.VIS_W64(1) = do_fchksm16(s1.VIS_W64(1), s2.VIS_W64(1));
+ r.VIS_W64(2) = do_fchksm16(s1.VIS_W64(2), s2.VIS_W64(2));
+ r.VIS_W64(3) = do_fchksm16(s1.VIS_W64(3), s2.VIS_W64(3));
+
+ return r.ll;
+}