@@ -60,6 +60,7 @@ typedef enum {
extern bool have_isa_altivec;
extern bool have_isa_2_06;
+extern bool have_isa_2_06_vsx;
extern bool have_isa_3_00;
/* optional instructions automatically implemented */
@@ -141,7 +142,7 @@ extern bool have_isa_3_00;
* instruction and substituting two 32-bit stores makes the generated
* code quite large.
*/
-#define TCG_TARGET_HAS_v64 0
+#define TCG_TARGET_HAS_v64 have_isa_2_06_vsx
#define TCG_TARGET_HAS_v128 have_isa_altivec
#define TCG_TARGET_HAS_v256 0
@@ -157,7 +158,7 @@ extern bool have_isa_3_00;
#define TCG_TARGET_HAS_mul_vec 1
#define TCG_TARGET_HAS_sat_vec 1
#define TCG_TARGET_HAS_minmax_vec 1
-#define TCG_TARGET_HAS_bitsel_vec 0
+#define TCG_TARGET_HAS_bitsel_vec have_isa_2_06_vsx
#define TCG_TARGET_HAS_cmpsel_vec 0
void flush_icache_range(uintptr_t start, uintptr_t stop);
@@ -66,6 +66,7 @@ static tcg_insn_unit *tb_ret_addr;
bool have_isa_altivec;
bool have_isa_2_06;
+bool have_isa_2_06_vsx;
bool have_isa_3_00;
#define HAVE_ISA_2_06 have_isa_2_06
@@ -470,9 +471,12 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type,
#define LVEBX XO31(7)
#define LVEHX XO31(39)
#define LVEWX XO31(71)
+#define LXSDX XO31(588) /* v2.06 */
+#define LXVDSX XO31(332) /* v2.06 */
#define STVX XO31(231)
#define STVEWX XO31(199)
+#define STXSDX XO31(716) /* v2.06 */
#define VADDSBS VX4(768)
#define VADDUBS VX4(512)
@@ -561,6 +565,9 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type,
#define VSLDOI VX4(44)
+#define XXPERMDI (OPCD(60) | (10 << 3)) /* v2.06 */
+#define XXSEL (OPCD(60) | (3 << 4)) /* v2.06 */
+
#define RT(r) ((r)<<21)
#define RS(r) ((r)<<21)
#define RA(r) ((r)<<16)
@@ -887,11 +894,21 @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret,
add = 0;
}
- load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
- if (TCG_TARGET_REG_BITS == 64) {
- new_pool_l2(s, rel, s->code_ptr, add, val, val);
+ if (have_isa_2_06_vsx) {
+ load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX;
+ load_insn |= VRT(ret) | RB(TCG_REG_TMP1) | 1;
+ if (TCG_TARGET_REG_BITS == 64) {
+ new_pool_label(s, val, rel, s->code_ptr, add);
+ } else {
+ new_pool_l2(s, rel, s->code_ptr, add, val, val);
+ }
} else {
- new_pool_l4(s, rel, s->code_ptr, add, val, val, val, val);
+ load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
+ if (TCG_TARGET_REG_BITS == 64) {
+ new_pool_l2(s, rel, s->code_ptr, add, val, val);
+ } else {
+ new_pool_l4(s, rel, s->code_ptr, add, val, val, val, val);
+ }
}
if (USE_REG_TB) {
@@ -1138,6 +1155,10 @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
/* fallthru */
case TCG_TYPE_V64:
tcg_debug_assert(ret >= 32);
+ if (have_isa_2_06_vsx) {
+ tcg_out_mem_long(s, 0, LXSDX | 1, ret & 31, base, offset);
+ break;
+ }
assert((offset & 7) == 0);
tcg_out_mem_long(s, 0, LVX, ret & 31, base, offset & -16);
if (offset & 8) {
@@ -1181,6 +1202,10 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
/* fallthru */
case TCG_TYPE_V64:
tcg_debug_assert(arg >= 32);
+ if (have_isa_2_06_vsx) {
+ tcg_out_mem_long(s, 0, STXSDX | 1, arg & 31, base, offset);
+ break;
+ }
assert((offset & 7) == 0);
if (offset & 8) {
tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8);
@@ -2916,6 +2941,8 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
case INDEX_op_shri_vec:
case INDEX_op_sari_vec:
return vece <= MO_32 ? -1 : 0;
+ case INDEX_op_bitsel_vec:
+ return have_isa_2_06_vsx;
default:
return 0;
}
@@ -2942,6 +2969,10 @@ static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16));
break;
case MO_64:
+ if (have_isa_2_06_vsx) {
+ tcg_out32(s, XXPERMDI | 7 | VRT(dst) | VRA(src) | VRB(src));
+ break;
+ }
tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8);
tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8);
break;
@@ -2986,6 +3017,10 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16));
break;
case MO_64:
+ if (have_isa_2_06_vsx) {
+ tcg_out_mem_long(s, 0, LXVDSX | 1, out, base, offset);
+ break;
+ }
assert((offset & 7) == 0);
tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8);
@@ -3120,6 +3155,10 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
}
break;
+ case INDEX_op_bitsel_vec:
+ tcg_out32(s, XXSEL | 0xf | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3]));
+ return;
+
case INDEX_op_dup2_vec:
assert(TCG_TARGET_REG_BITS == 32);
/* With inputs a1 = xLxx, a2 = xHxx */
@@ -3515,6 +3554,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
case INDEX_op_st_vec:
case INDEX_op_dupm_vec:
return &v_r;
+ case INDEX_op_bitsel_vec:
case INDEX_op_ppc_msum_vec:
return &v_v_v_v;
@@ -3533,6 +3573,9 @@ static void tcg_target_init(TCGContext *s)
}
if (hwcap & PPC_FEATURE_ARCH_2_06) {
have_isa_2_06 = true;
+ if (hwcap & PPC_FEATURE_HAS_VSX) {
+ have_isa_2_06_vsx = true;
+ }
}
#ifdef PPC_FEATURE2_ARCH_3_00
if (hwcap2 & PPC_FEATURE2_ARCH_3_00) {
This includes double-word loads and stores, double-word load and splat, double-word permute, and bit select. All of which require multiple operations in the base Altivec instruction set. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- tcg/ppc/tcg-target.h | 5 ++-- tcg/ppc/tcg-target.inc.c | 51 ++++++++++++++++++++++++++++++++++++---- 2 files changed, 50 insertions(+), 6 deletions(-) -- 2.17.1