@@ -112,7 +112,7 @@ extern bool use_vis3_instructions;
#define TCG_TARGET_HAS_muls2_i32 1
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_negsetcond_i32 0
+#define TCG_TARGET_HAS_negsetcond_i32 1
#define TCG_TARGET_HAS_qemu_st8_i32 0
#define TCG_TARGET_HAS_extrl_i64_i32 1
@@ -150,7 +150,7 @@ extern bool use_vis3_instructions;
#define TCG_TARGET_HAS_muls2_i64 0
#define TCG_TARGET_HAS_muluh_i64 use_vis3_instructions
#define TCG_TARGET_HAS_mulsh_i64 0
-#define TCG_TARGET_HAS_negsetcond_i64 0
+#define TCG_TARGET_HAS_negsetcond_i64 1
#define TCG_TARGET_HAS_qemu_ldst_i128 0
@@ -720,7 +720,7 @@ static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
}
static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg c1, int32_t c2, int c2const)
+ TCGReg c1, int32_t c2, int c2const, bool neg)
{
/* For 32-bit comparisons, we can play games with ADDC/SUBC. */
switch (cond) {
@@ -760,22 +760,30 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
default:
tcg_out_cmp(s, c1, c2, c2const);
tcg_out_movi_s13(s, ret, 0);
- tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
+ tcg_out_movcc(s, cond, MOVCC_ICC, ret, neg ? -1 : 1, 1);
return;
}
tcg_out_cmp(s, c1, c2, c2const);
if (cond == TCG_COND_LTU) {
- tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
+ if (neg) {
+ tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_SUBC);
+ } else {
+ tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
+ }
} else {
- tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
+ if (neg) {
+ tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_ADDC);
+ } else {
+ tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
+ }
}
}
static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg c1, int32_t c2, int c2const)
+ TCGReg c1, int32_t c2, int c2const, bool neg)
{
- if (use_vis3_instructions) {
+ if (use_vis3_instructions && !neg) {
switch (cond) {
case TCG_COND_NE:
if (c2 != 0) {
@@ -796,11 +804,11 @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
if the input does not overlap the output. */
if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
tcg_out_movi_s13(s, ret, 0);
- tcg_out_movr(s, cond, ret, c1, 1, 1);
+ tcg_out_movr(s, cond, ret, c1, neg ? -1 : 1, 1);
} else {
tcg_out_cmp(s, c1, c2, c2const);
tcg_out_movi_s13(s, ret, 0);
- tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
+ tcg_out_movcc(s, cond, MOVCC_XCC, ret, neg ? -1 : 1, 1);
}
}
@@ -1355,7 +1363,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
break;
case INDEX_op_setcond_i32:
- tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
+ tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, false);
+ break;
+ case INDEX_op_negsetcond_i32:
+ tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, true);
break;
case INDEX_op_movcond_i32:
tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
@@ -1437,7 +1448,10 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
break;
case INDEX_op_setcond_i64:
- tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
+ tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, false);
+ break;
+ case INDEX_op_negsetcond_i64:
+ tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, true);
break;
case INDEX_op_movcond_i64:
tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
@@ -1564,6 +1578,8 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_sar_i64:
case INDEX_op_setcond_i32:
case INDEX_op_setcond_i64:
+ case INDEX_op_negsetcond_i32:
+ case INDEX_op_negsetcond_i64:
return C_O1_I2(r, rZ, rJ);
case INDEX_op_brcond_i32:
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- tcg/sparc64/tcg-target.h | 4 ++-- tcg/sparc64/tcg-target.c.inc | 36 ++++++++++++++++++++++++++---------- 2 files changed, 28 insertions(+), 12 deletions(-)