@@ -245,6 +245,9 @@ DEF(or_vec, 1, 2, 0, IMPLVEC)
DEF(xor_vec, 1, 2, 0, IMPLVEC)
DEF(andc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_andc_vec))
DEF(orc_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_orc_vec))
+DEF(nand_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nand_vec))
+DEF(nor_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_nor_vec))
+DEF(eqv_vec, 1, 2, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_eqv_vec))
DEF(not_vec, 1, 1, 0, IMPLVEC | IMPL(TCG_TARGET_HAS_not_vec))
DEF(shli_vec, 1, 1, 1, IMPLVEC | IMPL(TCG_TARGET_HAS_shi_vec))
@@ -183,6 +183,9 @@ typedef uint64_t TCGRegSet;
#define TCG_TARGET_HAS_not_vec 0
#define TCG_TARGET_HAS_andc_vec 0
#define TCG_TARGET_HAS_orc_vec 0
+#define TCG_TARGET_HAS_nand_vec 0
+#define TCG_TARGET_HAS_nor_vec 0
+#define TCG_TARGET_HAS_eqv_vec 0
#define TCG_TARGET_HAS_roti_vec 0
#define TCG_TARGET_HAS_rots_vec 0
#define TCG_TARGET_HAS_rotv_vec 0
@@ -131,6 +131,9 @@ typedef enum {
#define TCG_TARGET_HAS_andc_vec 1
#define TCG_TARGET_HAS_orc_vec 1
+#define TCG_TARGET_HAS_nand_vec 0
+#define TCG_TARGET_HAS_nor_vec 0
+#define TCG_TARGET_HAS_eqv_vec 0
#define TCG_TARGET_HAS_not_vec 1
#define TCG_TARGET_HAS_neg_vec 1
#define TCG_TARGET_HAS_abs_vec 1
@@ -130,6 +130,9 @@ extern bool use_neon_instructions;
#define TCG_TARGET_HAS_andc_vec 1
#define TCG_TARGET_HAS_orc_vec 1
+#define TCG_TARGET_HAS_nand_vec 0
+#define TCG_TARGET_HAS_nor_vec 0
+#define TCG_TARGET_HAS_eqv_vec 0
#define TCG_TARGET_HAS_not_vec 1
#define TCG_TARGET_HAS_neg_vec 1
#define TCG_TARGET_HAS_abs_vec 1
@@ -185,6 +185,9 @@ extern bool have_movbe;
#define TCG_TARGET_HAS_andc_vec 1
#define TCG_TARGET_HAS_orc_vec 0
+#define TCG_TARGET_HAS_nand_vec 0
+#define TCG_TARGET_HAS_nor_vec 0
+#define TCG_TARGET_HAS_eqv_vec 0
#define TCG_TARGET_HAS_not_vec 0
#define TCG_TARGET_HAS_neg_vec 0
#define TCG_TARGET_HAS_abs_vec 1
@@ -162,6 +162,9 @@ extern bool have_vsx;
#define TCG_TARGET_HAS_andc_vec 1
#define TCG_TARGET_HAS_orc_vec have_isa_2_07
+#define TCG_TARGET_HAS_nand_vec 0
+#define TCG_TARGET_HAS_nor_vec 0
+#define TCG_TARGET_HAS_eqv_vec 0
#define TCG_TARGET_HAS_not_vec 1
#define TCG_TARGET_HAS_neg_vec have_isa_3_00
#define TCG_TARGET_HAS_abs_vec 0
@@ -145,6 +145,9 @@ extern uint64_t s390_facilities[3];
#define TCG_TARGET_HAS_andc_vec 1
#define TCG_TARGET_HAS_orc_vec HAVE_FACILITY(VECTOR_ENH1)
+#define TCG_TARGET_HAS_nand_vec 0
+#define TCG_TARGET_HAS_nor_vec 0
+#define TCG_TARGET_HAS_eqv_vec 0
#define TCG_TARGET_HAS_not_vec 1
#define TCG_TARGET_HAS_neg_vec 1
#define TCG_TARGET_HAS_abs_vec 1
@@ -359,13 +359,13 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
CASE_OP_32_64_VEC(orc):
return x | ~y;
- CASE_OP_32_64(eqv):
+ CASE_OP_32_64_VEC(eqv):
return ~(x ^ y);
- CASE_OP_32_64(nand):
+ CASE_OP_32_64_VEC(nand):
return ~(x & y);
- CASE_OP_32_64(nor):
+ CASE_OP_32_64_VEC(nor):
return ~(x | y);
case INDEX_op_clz_i32:
@@ -2119,7 +2119,7 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_dup2_vec:
done = fold_dup2(&ctx, op);
break;
- CASE_OP_32_64(eqv):
+ CASE_OP_32_64_VEC(eqv):
done = fold_eqv(&ctx, op);
break;
CASE_OP_32_64(extract):
@@ -2170,13 +2170,13 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(mulu2):
done = fold_multiply2(&ctx, op);
break;
- CASE_OP_32_64(nand):
+ CASE_OP_32_64_VEC(nand):
done = fold_nand(&ctx, op);
break;
CASE_OP_32_64(neg):
done = fold_neg(&ctx, op);
break;
- CASE_OP_32_64(nor):
+ CASE_OP_32_64_VEC(nor):
done = fold_nor(&ctx, op);
break;
CASE_OP_32_64_VEC(not):
@@ -371,23 +371,32 @@ void tcg_gen_orc_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
void tcg_gen_nand_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
{
- /* TODO: Add TCG_TARGET_HAS_nand_vec when adding a backend supports it. */
- tcg_gen_and_vec(0, r, a, b);
- tcg_gen_not_vec(0, r, r);
+ if (TCG_TARGET_HAS_nand_vec) {
+ vec_gen_op3(INDEX_op_nand_vec, 0, r, a, b);
+ } else {
+ tcg_gen_and_vec(0, r, a, b);
+ tcg_gen_not_vec(0, r, r);
+ }
}
void tcg_gen_nor_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
{
- /* TODO: Add TCG_TARGET_HAS_nor_vec when adding a backend supports it. */
- tcg_gen_or_vec(0, r, a, b);
- tcg_gen_not_vec(0, r, r);
+ if (TCG_TARGET_HAS_nor_vec) {
+ vec_gen_op3(INDEX_op_nor_vec, 0, r, a, b);
+ } else {
+ tcg_gen_or_vec(0, r, a, b);
+ tcg_gen_not_vec(0, r, r);
+ }
}
void tcg_gen_eqv_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
{
- /* TODO: Add TCG_TARGET_HAS_eqv_vec when adding a backend supports it. */
- tcg_gen_xor_vec(0, r, a, b);
- tcg_gen_not_vec(0, r, r);
+ if (TCG_TARGET_HAS_eqv_vec) {
+ vec_gen_op3(INDEX_op_eqv_vec, 0, r, a, b);
+ } else {
+ tcg_gen_xor_vec(0, r, a, b);
+ tcg_gen_not_vec(0, r, r);
+ }
}
static bool do_op2(unsigned vece, TCGv_vec r, TCGv_vec a, TCGOpcode opc)
@@ -1407,6 +1407,12 @@ bool tcg_op_supported(TCGOpcode op)
return have_vec && TCG_TARGET_HAS_andc_vec;
case INDEX_op_orc_vec:
return have_vec && TCG_TARGET_HAS_orc_vec;
+ case INDEX_op_nand_vec:
+ return have_vec && TCG_TARGET_HAS_nand_vec;
+ case INDEX_op_nor_vec:
+ return have_vec && TCG_TARGET_HAS_nor_vec;
+ case INDEX_op_eqv_vec:
+ return have_vec && TCG_TARGET_HAS_eqv_vec;
case INDEX_op_mul_vec:
return have_vec && TCG_TARGET_HAS_mul_vec;
case INDEX_op_shli_vec: