@@ -105,6 +105,7 @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
static void tcg_out_movi(TCGContext *s, TCGType type,
TCGReg ret, tcg_target_long arg);
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
+static void tcg_out_goto_tb(TCGContext *s, int which);
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS]);
@@ -4741,6 +4742,9 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
case INDEX_op_exit_tb:
tcg_out_exit_tb(s, op->args[0]);
break;
+ case INDEX_op_goto_tb:
+ tcg_out_goto_tb(s, op->args[0]);
+ break;
case INDEX_op_dup2_vec:
if (tcg_reg_alloc_dup2(s, op)) {
break;
@@ -1898,6 +1898,26 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
}
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /*
+ * Ensure that ADRP+ADD are 8-byte aligned so that an atomic
+ * write can be used to patch the target address.
+ */
+ if ((uintptr_t)s->code_ptr & 7) {
+ tcg_out32(s, NOP);
+ }
+ set_jmp_insn_offset(s, which);
+ /*
+ * actual branch destination will be patched by
+ * tb_target_set_jmp_target later
+ */
+ tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0);
+ tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0);
+ tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
+ set_jmp_reset_offset(s, which);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1917,25 +1937,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
#define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I])
switch (opc) {
- case INDEX_op_goto_tb:
- /*
- * Ensure that ADRP+ADD are 8-byte aligned so that an atomic
- * write can be used to patch the target address.
- */
- if ((uintptr_t)s->code_ptr & 7) {
- tcg_out32(s, NOP);
- }
- set_jmp_insn_offset(s, a0);
- /*
- * actual branch destination will be patched by
- * tb_target_set_jmp_target later
- */
- tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0);
- tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0);
- tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
- set_jmp_reset_offset(s, a0);
- break;
-
case INDEX_op_goto_ptr:
tcg_out_insn(s, 3207, BR, a0);
break;
@@ -2306,6 +2307,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
g_assert_not_reached();
}
@@ -1939,6 +1939,31 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
tcg_out_epilogue(s);
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /* Indirect jump method */
+ intptr_t ptr, dif, dil;
+ TCGReg base = TCG_REG_PC;
+
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
+ ptr = get_jmp_target_addr(s, which);
+ dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
+ dil = sextract32(dif, 0, 12);
+ if (dif != dil) {
+ /*
+ * The TB is close, but outside the 12 bits addressable by
+ * the load. We can extend this to 20 bits with a sub of a
+ * shifted immediate from pc. In the vastly unlikely event
+ * the code requires more than 1MB, we'll use 2 insns and
+ * be no worse off.
+ */
+ base = TCG_REG_R0;
+ tcg_out_movi32(s, COND_AL, base, ptr - dil);
+ }
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
+ set_jmp_reset_offset(s, which);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1947,29 +1972,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
int c;
switch (opc) {
- case INDEX_op_goto_tb:
- {
- /* Indirect jump method */
- intptr_t ptr, dif, dil;
- TCGReg base = TCG_REG_PC;
-
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
- ptr = get_jmp_target_addr(s, args[0]);
- dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
- dil = sextract32(dif, 0, 12);
- if (dif != dil) {
- /* The TB is close, but outside the 12 bits addressable by
- the load. We can extend this to 20 bits with a sub of a
- shifted immediate from pc. In the vastly unlikely event
- the code requires more than 1MB, we'll use 2 insns and
- be no worse off. */
- base = TCG_REG_R0;
- tcg_out_movi32(s, COND_AL, base, ptr - dil);
- }
- tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
- set_jmp_reset_offset(s, args[0]);
- }
- break;
case INDEX_op_goto_ptr:
tcg_out_b_reg(s, COND_AL, args[0]);
break;
@@ -2259,6 +2261,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
@@ -2358,6 +2358,22 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
}
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /*
+ * Jump displacement must be aligned for atomic patching;
+ * see if we need to add extra nops before jump
+ */
+ int gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
+ if (gap != 1) {
+ tcg_out_nopn(s, gap - 1);
+ }
+ tcg_out8(s, OPC_JMP_long); /* jmp im */
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, 0);
+ set_jmp_reset_offset(s, which);
+}
+
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -2382,22 +2398,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
const_a2 = const_args[2];
switch (opc) {
- case INDEX_op_goto_tb:
- {
- /*
- * Jump displacement must be aligned for atomic patching;
- * see if we need to add extra nops before jump
- */
- int gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
- if (gap != 1) {
- tcg_out_nopn(s, gap - 1);
- }
- tcg_out8(s, OPC_JMP_long); /* jmp im */
- set_jmp_insn_offset(s, a0);
- tcg_out32(s, 0);
- }
- set_jmp_reset_offset(s, a0);
- break;
case INDEX_op_goto_ptr:
/* jmp to the given host address (could be epilogue) */
tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
@@ -2792,6 +2792,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
@@ -1079,6 +1079,25 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
}
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /*
+ * Ensure that patch area is 8-byte aligned so that an
+ * atomic write can be used to patch the target address.
+ */
+ if ((uintptr_t)s->code_ptr & 7) {
+ tcg_out_nop(s);
+ }
+ set_jmp_insn_offset(s, which);
+ /*
+ * actual branch destination will be patched by
+ * tb_target_set_jmp_target later
+ */
+ tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, 0);
+ tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
+ set_jmp_reset_offset(s, which);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1089,24 +1108,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
int c2 = const_args[2];
switch (opc) {
- case INDEX_op_goto_tb:
- /*
- * Ensure that patch area is 8-byte aligned so that an
- * atomic write can be used to patch the target address.
- */
- if ((uintptr_t)s->code_ptr & 7) {
- tcg_out_nop(s);
- }
- set_jmp_insn_offset(s, a0);
- /*
- * actual branch destination will be patched by
- * tb_target_set_jmp_target later
- */
- tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, 0);
- tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
- set_jmp_reset_offset(s, a0);
- break;
-
case INDEX_op_mb:
tcg_out_mb(s, a0);
break;
@@ -1501,6 +1502,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
g_assert_not_reached();
}
@@ -1966,6 +1966,17 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /* indirect jump method */
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
+ get_jmp_target_addr(s, which));
+ tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
+ tcg_out_nop(s);
+ set_jmp_reset_offset(s, which);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1985,15 +1996,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
c2 = const_args[2];
switch (opc) {
- case INDEX_op_goto_tb:
- /* indirect jump method */
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
- get_jmp_target_addr(s, a0));
- tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
- tcg_out_nop(s);
- set_jmp_reset_offset(s, a0);
- break;
case INDEX_op_goto_ptr:
/* jmp to the given host address (could be epilogue) */
tcg_out_opc_reg(s, OPC_JR, 0, a0, 0);
@@ -2402,6 +2404,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
@@ -2622,6 +2622,32 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
tcg_out_b(s, 0, tcg_code_gen_epilogue);
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /* Direct jump. */
+ if (TCG_TARGET_REG_BITS == 64) {
+ /* Ensure the next insns are 8 or 16-byte aligned. */
+ while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
+ tcg_out32(s, NOP);
+ }
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
+ tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
+ tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
+ tcg_out32(s, BCCTR | BO_ALWAYS);
+ set_jmp_reset_offset(s, which);
+ if (USE_REG_TB) {
+ /* For the unlinked case, need to reset TCG_REG_TB. */
+ tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
+ -tcg_current_code_size(s));
+ }
+ } else {
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, B);
+ set_jmp_reset_offset(s, which);
+ }
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -2629,31 +2655,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
TCGArg a0, a1, a2;
switch (opc) {
- case INDEX_op_goto_tb:
- /* Direct jump. */
- if (TCG_TARGET_REG_BITS == 64) {
- /* Ensure the next insns are 8 or 16-byte aligned. */
- while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
- tcg_out32(s, NOP);
- }
- set_jmp_insn_offset(s, args[0]);
- tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
- tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
- } else {
- set_jmp_insn_offset(s, args[0]);
- tcg_out32(s, B);
- set_jmp_reset_offset(s, args[0]);
- break;
- }
- tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
- tcg_out32(s, BCCTR | BO_ALWAYS);
- set_jmp_reset_offset(s, args[0]);
- if (USE_REG_TB) {
- /* For the unlinked case, need to reset TCG_REG_TB. */
- tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
- -tcg_current_code_size(s));
- }
- break;
case INDEX_op_goto_ptr:
tcg_out32(s, MTSPR | RS(args[0]) | CTR);
if (USE_REG_TB) {
@@ -3181,6 +3182,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
@@ -1300,6 +1300,16 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
}
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
+ /* indirect jump method */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
+ get_jmp_target_addr(s, which));
+ tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
+ set_jmp_reset_offset(s, which);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1310,15 +1320,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
int c2 = const_args[2];
switch (opc) {
- case INDEX_op_goto_tb:
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
- /* indirect jump method */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
- get_jmp_target_addr(s, a0));
- tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
- set_jmp_reset_offset(s, a0);
- break;
-
case INDEX_op_goto_ptr:
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
break;
@@ -1619,6 +1620,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
g_assert_not_reached();
}
@@ -1955,6 +1955,21 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
}
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /*
+ * Branch displacement must be aligned for atomic patching;
+ * see if we need to add extra nop before branch
+ */
+ if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
+ tcg_out16(s, NOP);
+ }
+ tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
+ set_jmp_insn_offset(s, which);
+ s->code_ptr += 2;
+ set_jmp_reset_offset(s, which);
+}
+
# define OP_32_64(x) \
case glue(glue(INDEX_op_,x),_i32): \
case glue(glue(INDEX_op_,x),_i64)
@@ -1967,21 +1982,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
TCGArg a0, a1, a2;
switch (opc) {
- case INDEX_op_goto_tb:
- a0 = args[0];
- /*
- * branch displacement must be aligned for atomic patching;
- * see if we need to add extra nop before branch
- */
- if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
- tcg_out16(s, NOP);
- }
- tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
- set_jmp_insn_offset(s, a0);
- s->code_ptr += 2;
- set_jmp_reset_offset(s, a0);
- break;
-
case INDEX_op_goto_ptr:
a0 = args[0];
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
@@ -2620,6 +2620,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
@@ -1437,6 +1437,41 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /* Direct jump. */
+ if (USE_REG_TB) {
+ /* make sure the patch is 8-byte aligned. */
+ if ((intptr_t)s->code_ptr & 4) {
+ tcg_out_nop(s);
+ }
+ set_jmp_insn_offset(s, which);
+ tcg_out_sethi(s, TCG_REG_T1, 0);
+ tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
+ tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
+ tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
+ } else {
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, CALL);
+ tcg_out_nop(s);
+ }
+ set_jmp_reset_offset(s, which);
+
+ /*
+ * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
+ * to the beginning of this TB.
+ */
+ if (USE_REG_TB) {
+ int c = -tcg_current_code_size(s);
+ if (check_fit_i32(c, 13)) {
+ tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
+ tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
+ }
+ }
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1451,38 +1486,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
c2 = const_args[2];
switch (opc) {
- case INDEX_op_goto_tb:
- /* Direct jump. */
- if (USE_REG_TB) {
- /* make sure the patch is 8-byte aligned. */
- if ((intptr_t)s->code_ptr & 4) {
- tcg_out_nop(s);
- }
- set_jmp_insn_offset(s, a0);
- tcg_out_sethi(s, TCG_REG_T1, 0);
- tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
- tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
- } else {
- set_jmp_insn_offset(s, a0);
- tcg_out32(s, CALL);
- tcg_out_nop(s);
- }
- set_jmp_reset_offset(s, a0);
-
- /* For the unlinked path of goto_tb, we need to reset
- TCG_REG_TB to the beginning of this TB. */
- if (USE_REG_TB) {
- c = -tcg_current_code_size(s);
- if (check_fit_i32(c, 13)) {
- tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
- } else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
- TCG_REG_T1, ARITH_ADD);
- }
- }
- break;
case INDEX_op_goto_ptr:
tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
if (USE_REG_TB) {
@@ -1701,6 +1704,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
@@ -595,6 +595,14 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
+ /* indirect jump method. */
+ tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
+ set_jmp_reset_offset(s, which);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -602,13 +610,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
TCGOpcode exts;
switch (opc) {
- case INDEX_op_goto_tb:
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
- /* indirect jump method. */
- tcg_out_op_p(s, opc, (void *)get_jmp_target_addr(s, args[0]));
- set_jmp_reset_offset(s, args[0]);
- break;
-
case INDEX_op_goto_ptr:
tcg_out_op_r(s, opc, args[0]);
break;
@@ -781,6 +782,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}