@@ -94,7 +94,7 @@ typedef enum {
#define TCG_TARGET_HAS_mulsh_i32 0
#define TCG_TARGET_HAS_extrl_i64_i32 0
#define TCG_TARGET_HAS_extrh_i64_i32 0
-#define TCG_TARGET_HAS_negsetcond_i32 0
+#define TCG_TARGET_HAS_negsetcond_i32 1
#define TCG_TARGET_HAS_qemu_st8_i32 0
#define TCG_TARGET_HAS_div_i64 1
@@ -130,7 +130,7 @@ typedef enum {
#define TCG_TARGET_HAS_muls2_i64 0
#define TCG_TARGET_HAS_muluh_i64 1
#define TCG_TARGET_HAS_mulsh_i64 1
-#define TCG_TARGET_HAS_negsetcond_i64 0
+#define TCG_TARGET_HAS_negsetcond_i64 1
/*
* Without FEAT_LSE2, we must use LDXP+STXP to implement atomic 128-bit load,
@@ -2262,6 +2262,16 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
TCG_REG_XZR, tcg_invert_cond(args[3]));
break;
+ case INDEX_op_negsetcond_i32:
+ a2 = (int32_t)a2;
+ /* FALLTHRU */
+ case INDEX_op_negsetcond_i64:
+ tcg_out_cmp(s, ext, a1, a2, c2);
+ /* Use CSETM alias of CSINV Wd, WZR, WZR, invert(cond). */
+ tcg_out_insn(s, 3506, CSINV, ext, a0, TCG_REG_XZR,
+ TCG_REG_XZR, tcg_invert_cond(args[3]));
+ break;
+
case INDEX_op_movcond_i32:
a2 = (int32_t)a2;
/* FALLTHRU */
@@ -2868,6 +2878,8 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_sub_i64:
case INDEX_op_setcond_i32:
case INDEX_op_setcond_i64:
+ case INDEX_op_negsetcond_i32:
+ case INDEX_op_negsetcond_i64:
return C_O1_I2(r, r, rA);
case INDEX_op_mul_i32: