@@ -690,8 +690,8 @@ tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm)
tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
}
-static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt,
- TCGReg rn, int imm8)
+static void __attribute__((unused))
+tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, int imm8)
{
tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
}
@@ -969,28 +969,16 @@ static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn)
tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff);
}
-static void __attribute__((unused))
-tcg_out_ext8u_cond(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
-{
- tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
-}
-
static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
{
/* sxth */
tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn);
}
-static void tcg_out_ext16u_cond(TCGContext *s, ARMCond cond,
- TCGReg rd, TCGReg rn)
-{
- /* uxth */
- tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
-}
-
static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn)
{
- tcg_out_ext16u_cond(s, COND_AL, rd, rn);
+ /* uxth */
+ tcg_out32(s, 0x06ff0070 | (COND_AL << 28) | (rd << 12) | rn);
}
static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn)
@@ -1382,92 +1370,29 @@ static void * const qemu_st_helpers[MO_SIZE + 1] = {
#endif
};
-/* Helper routines for marshalling helper function arguments into
- * the correct registers and stack.
- * argreg is where we want to put this argument, arg is the argument itself.
- * Return value is the updated argreg ready for the next call.
- * Note that argreg 0..3 is real registers, 4+ on stack.
- *
- * We provide routines for arguments which are: immediate, 32 bit
- * value in register, 16 and 8 bit values in register (which must be zero
- * extended before use) and 64 bit value in a lo:hi register pair.
- */
-#define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
-static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
-{ \
- if (argreg < 4) { \
- MOV_ARG(s, COND_AL, argreg, arg); \
- } else { \
- int ofs = (argreg - 4) * 4; \
- EXT_ARG; \
- tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
- tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
- } \
- return argreg + 1; \
-}
-
-DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
- (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
-DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u_cond,
- (tcg_out_ext8u_cond(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
-DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u_cond,
- (tcg_out_ext16u_cond(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
-DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
-
-static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
- TCGReg arglo, TCGReg arghi)
+static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg)
{
- /* 64 bit arguments must go in even/odd register pairs
- * and in 8-aligned stack slots.
- */
- if (argreg & 1) {
- argreg++;
- }
- if (argreg >= 4 && (arglo & 1) == 0 && arghi == arglo + 1) {
- tcg_out_strd_8(s, COND_AL, arglo,
- TCG_REG_CALL_STACK, (argreg - 4) * 4);
- return argreg + 2;
- } else {
- argreg = tcg_out_arg_reg32(s, argreg, arglo);
- argreg = tcg_out_arg_reg32(s, argreg, arghi);
- return argreg;
- }
+ /* We arrive at the slow path via "BLNE", so R14 contains l->raddr. */
+ return TCG_REG_R14;
}
+static const TCGLdstHelperParam ldst_helper_param = {
+ .ra_gen = ldst_ra_gen,
+ .ntmp = 1,
+ .tmp = { TCG_REG_TMP },
+};
+
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
- TCGReg argreg;
- MemOpIdx oi = lb->oi;
- MemOp opc = get_memop(oi);
+ MemOp opc = get_memop(lb->oi);
if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
return false;
}
- argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
- if (TARGET_LONG_BITS == 64) {
- argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
- } else {
- argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
- }
- argreg = tcg_out_arg_imm32(s, argreg, oi);
- argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
-
- /* Use the canonical unsigned helpers and minimize icache usage. */
+ tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
-
- if ((opc & MO_SIZE) == MO_64) {
- TCGMovExtend ext[2] = {
- { .dst = lb->datalo_reg, .dst_type = TCG_TYPE_I32,
- .src = TCG_REG_R0, .src_type = TCG_TYPE_I32, .src_ext = MO_UL },
- { .dst = lb->datahi_reg, .dst_type = TCG_TYPE_I32,
- .src = TCG_REG_R1, .src_type = TCG_TYPE_I32, .src_ext = MO_UL },
- };
- tcg_out_movext2(s, &ext[0], &ext[1], TCG_REG_TMP);
- } else {
- tcg_out_movext(s, TCG_TYPE_I32, lb->datalo_reg,
- TCG_TYPE_I32, opc & MO_SSIZE, TCG_REG_R0);
- }
+ tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
tcg_out_goto(s, COND_AL, lb->raddr);
return true;
@@ -1475,42 +1400,13 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
{
- TCGReg argreg, datalo, datahi;
- MemOpIdx oi = lb->oi;
- MemOp opc = get_memop(oi);
+ MemOp opc = get_memop(lb->oi);
if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
return false;
}
- argreg = TCG_REG_R0;
- argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
- if (TARGET_LONG_BITS == 64) {
- argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
- } else {
- argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
- }
-
- datalo = lb->datalo_reg;
- datahi = lb->datahi_reg;
- switch (opc & MO_SIZE) {
- case MO_8:
- argreg = tcg_out_arg_reg8(s, argreg, datalo);
- break;
- case MO_16:
- argreg = tcg_out_arg_reg16(s, argreg, datalo);
- break;
- case MO_32:
- default:
- argreg = tcg_out_arg_reg32(s, argreg, datalo);
- break;
- case MO_64:
- argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi);
- break;
- }
-
- argreg = tcg_out_arg_imm32(s, argreg, oi);
- argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
+ tcg_out_st_helper_args(s, lb, &ldst_helper_param);
/* Tail-call to the helper, which will return to the fast path. */
tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);