@@ -206,6 +206,7 @@ static void * const qemu_ld_helpers[MO_SSIZE + 1] __attribute__((unused)) = {
[MO_UQ] = helper_ldq_mmu,
#if TCG_TARGET_REG_BITS == 64
[MO_SL] = helper_ldsl_mmu,
+ [MO_128] = helper_ld16_mmu,
#endif
};
@@ -214,6 +215,9 @@ static void * const qemu_st_helpers[MO_SIZE + 1] __attribute__((unused)) = {
[MO_16] = helper_stw_mmu,
[MO_32] = helper_stl_mmu,
[MO_64] = helper_stq_mmu,
+#if TCG_TARGET_REG_BITS == 64
+ [MO_128] = helper_st16_mmu,
+#endif
};
TCGContext tcg_init_ctx;
@@ -866,6 +870,15 @@ static TCGHelperInfo info_helper_ld64_mmu = {
| dh_typemask(ptr, 4) /* uintptr_t ra */
};
+static TCGHelperInfo info_helper_ld128_mmu = {
+ .flags = TCG_CALL_NO_WG,
+ .typemask = dh_typemask(i128, 0) /* return Int128 */
+ | dh_typemask(env, 1)
+ | dh_typemask(tl, 2) /* target_ulong addr */
+ | dh_typemask(i32, 3) /* unsigned oi */
+ | dh_typemask(ptr, 4) /* uintptr_t ra */
+};
+
static TCGHelperInfo info_helper_st32_mmu = {
.flags = TCG_CALL_NO_WG,
.typemask = dh_typemask(void, 0)
@@ -886,6 +899,16 @@ static TCGHelperInfo info_helper_st64_mmu = {
| dh_typemask(ptr, 5) /* uintptr_t ra */
};
+static TCGHelperInfo info_helper_st128_mmu = {
+ .flags = TCG_CALL_NO_WG,
+ .typemask = dh_typemask(void, 0)
+ | dh_typemask(env, 1)
+ | dh_typemask(tl, 2) /* target_ulong addr */
+ | dh_typemask(i128, 3) /* Int128 data */
+ | dh_typemask(i32, 4) /* unsigned oi */
+ | dh_typemask(ptr, 5) /* uintptr_t ra */
+};
+
#ifdef CONFIG_TCG_INTERPRETER
static ffi_type *typecode_to_ffi(int argmask)
{
@@ -1299,8 +1322,10 @@ static void tcg_context_init(unsigned max_cpus)
init_call_layout(&info_helper_ld32_mmu);
init_call_layout(&info_helper_ld64_mmu);
+ init_call_layout(&info_helper_ld128_mmu);
init_call_layout(&info_helper_st32_mmu);
init_call_layout(&info_helper_st64_mmu);
+ init_call_layout(&info_helper_st128_mmu);
#ifdef CONFIG_TCG_INTERPRETER
init_ffi_layouts();
@@ -5395,6 +5420,8 @@ static unsigned tcg_out_helper_add_mov(TCGMovExtend *mov,
TCGType dst_type, TCGType src_type,
TCGReg lo, TCGReg hi)
{
+ MemOp reg_mo;
+
if (dst_type <= TCG_TYPE_REG) {
MemOp src_ext;
@@ -5422,19 +5449,25 @@ static unsigned tcg_out_helper_add_mov(TCGMovExtend *mov,
return 1;
}
- assert(TCG_TARGET_REG_BITS == 32);
+ if (TCG_TARGET_REG_BITS == 32) {
+ assert(dst_type == TCG_TYPE_I64);
+ reg_mo = MO_32;
+ } else {
+ assert(dst_type == TCG_TYPE_I128);
+ reg_mo = MO_64;
+ }
mov[0].dst = loc[HOST_BIG_ENDIAN].arg_slot;
mov[0].src = lo;
- mov[0].dst_type = TCG_TYPE_I32;
- mov[0].src_type = TCG_TYPE_I32;
- mov[0].src_ext = MO_32;
+ mov[0].dst_type = TCG_TYPE_REG;
+ mov[0].src_type = TCG_TYPE_REG;
+ mov[0].src_ext = reg_mo;
mov[1].dst = loc[!HOST_BIG_ENDIAN].arg_slot;
mov[1].src = hi;
- mov[1].dst_type = TCG_TYPE_I32;
- mov[1].src_type = TCG_TYPE_I32;
- mov[1].src_ext = MO_32;
+ mov[1].dst_type = TCG_TYPE_REG;
+ mov[1].src_type = TCG_TYPE_REG;
+ mov[1].src_ext = reg_mo;
return 2;
}
@@ -5457,6 +5490,9 @@ static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
case MO_64:
info = &info_helper_ld64_mmu;
break;
+ case MO_128:
+ info = &info_helper_ld128_mmu;
+ break;
default:
g_assert_not_reached();
}
@@ -5471,8 +5507,33 @@ static void tcg_out_ld_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
tcg_out_helper_load_slots(s, nmov, mov, parm);
- /* No special attention for 32 and 64-bit return values. */
- tcg_debug_assert(info->out_kind == TCG_CALL_RET_NORMAL);
+ switch (info->out_kind) {
+ case TCG_CALL_RET_NORMAL:
+ case TCG_CALL_RET_BY_VEC:
+ break;
+ case TCG_CALL_RET_BY_REF:
+ /*
+ * The return reference is in the first argument slot.
+ * We need memory in which to return: re-use the top of stack.
+ */
+ {
+ int ofs_slot0 = TCG_TARGET_CALL_STACK_OFFSET;
+
+ if (arg_slot_reg_p(0)) {
+ tcg_out_addi_ptr(s, tcg_target_call_iarg_regs[0],
+ TCG_REG_CALL_STACK, ofs_slot0);
+ } else {
+ tcg_debug_assert(parm->ntmp != 0);
+ tcg_out_addi_ptr(s, parm->tmp[0],
+ TCG_REG_CALL_STACK, ofs_slot0);
+ tcg_out_st(s, TCG_TYPE_PTR, parm->tmp[0],
+ TCG_REG_CALL_STACK, ofs_slot0);
+ }
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg);
}
@@ -5481,11 +5542,18 @@ static void tcg_out_ld_helper_ret(TCGContext *s, const TCGLabelQemuLdst *ldst,
bool load_sign,
const TCGLdstHelperParam *parm)
{
+ MemOp mop = get_memop(ldst->oi);
TCGMovExtend mov[2];
+ int ofs_slot0;
- if (ldst->type <= TCG_TYPE_REG) {
- MemOp mop = get_memop(ldst->oi);
+ switch (ldst->type) {
+ case TCG_TYPE_I64:
+ if (TCG_TARGET_REG_BITS == 32) {
+ break;
+ }
+ /* fall through */
+ case TCG_TYPE_I32:
mov[0].dst = ldst->datalo_reg;
mov[0].src = tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, 0);
mov[0].dst_type = ldst->type;
@@ -5511,25 +5579,49 @@ static void tcg_out_ld_helper_ret(TCGContext *s, const TCGLabelQemuLdst *ldst,
mov[0].src_ext = mop & MO_SSIZE;
}
tcg_out_movext1(s, mov);
- } else {
- assert(TCG_TARGET_REG_BITS == 32);
+ return;
- mov[0].dst = ldst->datalo_reg;
- mov[0].src =
- tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, HOST_BIG_ENDIAN);
- mov[0].dst_type = TCG_TYPE_I32;
- mov[0].src_type = TCG_TYPE_I32;
- mov[0].src_ext = MO_32;
+ case TCG_TYPE_I128:
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
+ ofs_slot0 = TCG_TARGET_CALL_STACK_OFFSET;
+ switch (TCG_TARGET_CALL_RET_I128) {
+ case TCG_CALL_RET_NORMAL:
+ break;
+ case TCG_CALL_RET_BY_VEC:
+ tcg_out_st(s, TCG_TYPE_V128,
+ tcg_target_call_oarg_reg(TCG_CALL_RET_BY_VEC, 0),
+ TCG_REG_CALL_STACK, ofs_slot0);
+ /* fall through */
+ case TCG_CALL_RET_BY_REF:
+ tcg_out_ld(s, TCG_TYPE_I64, ldst->datalo_reg,
+ TCG_REG_CALL_STACK, ofs_slot0 + 8 * HOST_BIG_ENDIAN);
+ tcg_out_ld(s, TCG_TYPE_I64, ldst->datahi_reg,
+ TCG_REG_CALL_STACK, ofs_slot0 + 8 * !HOST_BIG_ENDIAN);
+ return;
+ default:
+ g_assert_not_reached();
+ }
+ break;
- mov[1].dst = ldst->datahi_reg;
- mov[1].src =
- tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, !HOST_BIG_ENDIAN);
- mov[1].dst_type = TCG_TYPE_REG;
- mov[1].src_type = TCG_TYPE_REG;
- mov[1].src_ext = MO_32;
-
- tcg_out_movext2(s, mov, mov + 1, parm->ntmp ? parm->tmp[0] : -1);
+ default:
+ g_assert_not_reached();
}
+
+ mov[0].dst = ldst->datalo_reg;
+ mov[0].src =
+ tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, HOST_BIG_ENDIAN);
+ mov[0].dst_type = TCG_TYPE_I32;
+ mov[0].src_type = TCG_TYPE_I32;
+ mov[0].src_ext = TCG_TARGET_REG_BITS == 32 ? MO_32 : MO_64;
+
+ mov[1].dst = ldst->datahi_reg;
+ mov[1].src =
+ tcg_target_call_oarg_reg(TCG_CALL_RET_NORMAL, !HOST_BIG_ENDIAN);
+ mov[1].dst_type = TCG_TYPE_REG;
+ mov[1].src_type = TCG_TYPE_REG;
+ mov[1].src_ext = TCG_TARGET_REG_BITS == 32 ? MO_32 : MO_64;
+
+ tcg_out_movext2(s, mov, mov + 1, parm->ntmp ? parm->tmp[0] : -1);
}
static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
@@ -5553,6 +5645,10 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
info = &info_helper_st64_mmu;
data_type = TCG_TYPE_I64;
break;
+ case MO_128:
+ info = &info_helper_st128_mmu;
+ data_type = TCG_TYPE_I128;
+ break;
default:
g_assert_not_reached();
}
@@ -5570,13 +5666,47 @@ static void tcg_out_st_helper_args(TCGContext *s, const TCGLabelQemuLdst *ldst,
/* Handle data argument. */
loc = &info->in[next_arg];
- n = tcg_out_helper_add_mov(mov + nmov, loc, data_type, ldst->type,
- ldst->datalo_reg, ldst->datahi_reg);
- next_arg += n;
- nmov += n;
- tcg_debug_assert(nmov <= ARRAY_SIZE(mov));
+ switch (loc->kind) {
+ case TCG_CALL_ARG_NORMAL:
+ case TCG_CALL_ARG_EXTEND_U:
+ case TCG_CALL_ARG_EXTEND_S:
+ n = tcg_out_helper_add_mov(mov + nmov, loc, data_type, ldst->type,
+ ldst->datalo_reg, ldst->datahi_reg);
+ next_arg += n;
+ nmov += n;
+ tcg_out_helper_load_slots(s, nmov, mov, parm);
+ break;
+
+ case TCG_CALL_ARG_BY_REF:
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
+ tcg_debug_assert(data_type == TCG_TYPE_I128);
+ tcg_out_st(s, TCG_TYPE_I64,
+ HOST_BIG_ENDIAN ? ldst->datahi_reg : ldst->datalo_reg,
+ TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc[0].ref_slot));
+ tcg_out_st(s, TCG_TYPE_I64,
+ HOST_BIG_ENDIAN ? ldst->datalo_reg : ldst->datahi_reg,
+ TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc[1].ref_slot));
+
+ tcg_out_helper_load_slots(s, nmov, mov, parm);
+
+ if (arg_slot_reg_p(loc->arg_slot)) {
+ tcg_out_addi_ptr(s, tcg_target_call_iarg_regs[loc->arg_slot],
+ TCG_REG_CALL_STACK,
+ arg_slot_stk_ofs(loc->ref_slot));
+ } else {
+ tcg_debug_assert(parm->ntmp != 0);
+ tcg_out_addi_ptr(s, parm->tmp[0], TCG_REG_CALL_STACK,
+ arg_slot_stk_ofs(loc->ref_slot));
+ tcg_out_st(s, TCG_TYPE_PTR, parm->tmp[0],
+ TCG_REG_CALL_STACK, arg_slot_stk_ofs(loc->arg_slot));
+ }
+ next_arg += 2;
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
- tcg_out_helper_load_slots(s, nmov, mov, parm);
tcg_out_helper_load_common_args(s, ldst, parm, info, next_arg);
}