@@ -735,6 +735,9 @@ void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg);
void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg);
void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg);
+void tcg_gen_extr_i128_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i128 arg);
+void tcg_gen_concat_i64_i128(TCGv_i128 ret, TCGv_i64 lo, TCGv_i64 hi);
+
static inline void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi)
{
tcg_gen_deposit_i64(ret, lo, hi, 32, 32);
@@ -158,6 +158,10 @@ DEF(extrh_i64_i32, 1, 1, 0,
IMPL(TCG_TARGET_HAS_extrh_i64_i32)
| (TCG_TARGET_REG_BITS == 32 ? TCG_OPF_NOT_PRESENT : 0))
+/* For 32-bit host only, implemented generically using ld/st/mov. */
+DEF(extr_i128_i32, 1, 1, 1, TCG_OPF_NOT_PRESENT)
+DEF(concat4_i32_i128, 1, 4, 0, TCG_OPF_NOT_PRESENT)
+
DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_COND_BRANCH | IMPL64)
DEF(ext8s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext8s_i64))
DEF(ext16s_i64, 1, 1, 0, IMPL64 | IMPL(TCG_TARGET_HAS_ext16s_i64))
@@ -110,9 +110,21 @@ static inline TCGv_i32 TCGV_HIGH(TCGv_i64 t)
{
return temp_tcgv_i32(tcgv_i64_temp(t) + !HOST_BIG_ENDIAN);
}
+extern TCGv_i64 TCGV128_LOW(TCGv_i128)
+ QEMU_ERROR("64-bit code path is reachable");
+extern TCGv_i64 TCGV128_HIGH(TCGv_i128)
+ QEMU_ERROR("64-bit code path is reachable");
#else
extern TCGv_i32 TCGV_LOW(TCGv_i64) QEMU_ERROR("32-bit code path is reachable");
extern TCGv_i32 TCGV_HIGH(TCGv_i64) QEMU_ERROR("32-bit code path is reachable");
+static inline TCGv_i64 TCGV128_LOW(TCGv_i128 t)
+{
+ return temp_tcgv_i64(tcgv_i128_temp(t) + HOST_BIG_ENDIAN);
+}
+static inline TCGv_i64 TCGV128_HIGH(TCGv_i128 t)
+{
+ return temp_tcgv_i64(tcgv_i128_temp(t) + !HOST_BIG_ENDIAN);
+}
#endif
#endif /* TCG_INTERNAL_H */
@@ -2718,6 +2718,41 @@ void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg)
tcg_gen_shri_i64(hi, arg, 32);
}
+void tcg_gen_extr_i128_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i128 arg)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ TCGArg a_arg = tcgv_i128_arg(arg);
+ int be = HOST_BIG_ENDIAN ? 0xc : 0;
+
+ tcg_gen_op3(INDEX_op_extr_i128_i32, tcgv_i32_arg(TCGV_LOW(lo)),
+ a_arg, 0x0 ^ be);
+ tcg_gen_op3(INDEX_op_extr_i128_i32, tcgv_i32_arg(TCGV_HIGH(lo)),
+ a_arg, 0x4 ^ be);
+ tcg_gen_op3(INDEX_op_extr_i128_i32, tcgv_i32_arg(TCGV_LOW(hi)),
+ a_arg, 0x8 ^ be);
+ tcg_gen_op3(INDEX_op_extr_i128_i32, tcgv_i32_arg(TCGV_HIGH(hi)),
+ a_arg, 0xc ^ be);
+ } else {
+ tcg_gen_mov_i64(lo, TCGV128_LOW(arg));
+ tcg_gen_mov_i64(hi, TCGV128_HIGH(arg));
+ }
+}
+
+void tcg_gen_concat_i64_i128(TCGv_i128 ret, TCGv_i64 lo, TCGv_i64 hi)
+{
+ if (TCG_TARGET_REG_BITS == 32) {
+ tcg_gen_op5(INDEX_op_concat4_i32_i128,
+ tcgv_i128_arg(ret),
+ tcgv_i32_arg(TCGV_LOW(lo)),
+ tcgv_i32_arg(TCGV_HIGH(lo)),
+ tcgv_i32_arg(TCGV_LOW(hi)),
+ tcgv_i32_arg(TCGV_HIGH(hi)));
+ } else {
+ tcg_gen_mov_i64(TCGV128_LOW(ret), lo);
+ tcg_gen_mov_i64(TCGV128_HIGH(ret), hi);
+ }
+}
+
/* QEMU specific operations. */
void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx)
@@ -3949,6 +3949,85 @@ static void tcg_reg_alloc_mov(TCGContext *s, const TCGOp *op)
}
}
+/*
+ * Specialized code generation for TCG_TYPE_I128 on 32-bit host.
+ * Here, 128-bit values are *always* in memory, never regs or constants.
+ * Move 32-bit pieces into and out of the 128-bit memory slot.
+ */
+static void tcg_reg_alloc_exrl_i128_i32(TCGContext *s, const TCGOp *op)
+{
+ const TCGLifeData arg_life = op->life;
+ TCGTemp *ots = arg_temp(op->args[0]);
+ TCGTemp *its = arg_temp(op->args[1]);
+ TCGArg ofs = op->args[2];
+ TCGReg reg;
+
+ assert(TCG_TARGET_REG_BITS == 32);
+ tcg_debug_assert(ots->type == TCG_TYPE_I32);
+ tcg_debug_assert(!temp_readonly(ots));
+ tcg_debug_assert(its->type == TCG_TYPE_I128);
+ tcg_debug_assert(its->val_type == TEMP_VAL_MEM);
+ tcg_debug_assert(ofs < 16);
+ tcg_debug_assert((ofs & 3) == 0);
+
+ if (ots->val_type == TEMP_VAL_REG) {
+ reg = ots->reg;
+ } else {
+ reg = tcg_reg_alloc(s, tcg_target_available_regs[TCG_TYPE_I32],
+ s->reserved_regs, op->output_pref[0],
+ ots->indirect_base);
+ ots->val_type = TEMP_VAL_REG;
+ ots->reg = reg;
+ s->reg_to_temp[reg] = ots;
+ }
+
+ tcg_out_ld(s, TCG_TYPE_I32, reg,
+ its->mem_base->reg, its->mem_offset + ofs);
+ ots->mem_coherent = 0;
+
+ if (IS_DEAD_ARG(1)) {
+ temp_dead(s, its);
+ }
+}
+
+static void tcg_reg_alloc_concat4_i32_i128(TCGContext *s, const TCGOp *op)
+{
+ const TCGLifeData arg_life = op->life;
+ TCGTemp *ots = arg_temp(op->args[0]);
+ int be = HOST_BIG_ENDIAN ? 0xc : 0;
+
+ assert(TCG_TARGET_REG_BITS == 32);
+ tcg_debug_assert(ots->type == TCG_TYPE_I128);
+ tcg_debug_assert(!temp_readonly(ots));
+ tcg_debug_assert(NEED_SYNC_ARG(0));
+
+ if (!ots->mem_allocated) {
+ temp_allocate_frame(s, ots);
+ }
+
+ for (int i = 0; i < 4; ++i) {
+ TCGTemp *its = arg_temp(op->args[i + 1]);
+ int ofs = ots->mem_offset + ((i * 4) ^ be);
+
+ if (its->val_type == TEMP_VAL_CONST &&
+ IS_DEAD_ARG(i + 1) &&
+ tcg_out_sti(s, TCG_TYPE_I32, its->val, ots->mem_base->reg, ofs)) {
+ continue;
+ }
+
+ temp_load(s, its, tcg_target_available_regs[TCG_TYPE_I32],
+ s->reserved_regs, 0);
+ tcg_out_st(s, TCG_TYPE_I32, its->reg, ots->mem_base->reg, ofs);
+
+ if (IS_DEAD_ARG(i + 1)) {
+ temp_dead(s, its);
+ }
+ }
+
+ ots->val_type = TEMP_VAL_MEM;
+ ots->mem_coherent = 1;
+}
+
/*
* Specialized code generation for INDEX_op_dup_vec.
*/
@@ -5009,6 +5088,12 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
case INDEX_op_mov_vec:
tcg_reg_alloc_mov(s, op);
break;
+ case INDEX_op_extr_i128_i32:
+ tcg_reg_alloc_exrl_i128_i32(s, op);
+ break;
+ case INDEX_op_concat4_i32_i128:
+ tcg_reg_alloc_concat4_i32_i128(s, op);
+ break;
case INDEX_op_dup_vec:
tcg_reg_alloc_dup(s, op);
break;
Add code generation functions for data movement between TCGv_i128 and TCGv_i64. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- include/tcg/tcg-op.h | 3 ++ include/tcg/tcg-opc.h | 4 ++ tcg/tcg-internal.h | 12 ++++++ tcg/tcg-op.c | 35 ++++++++++++++++++ tcg/tcg.c | 85 +++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 139 insertions(+)