@@ -108,6 +108,7 @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
static void tcg_out_movi(TCGContext *s, TCGType type,
TCGReg ret, tcg_target_long arg);
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
+static void tcg_out_goto_tb(TCGContext *s, int which);
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS]);
@@ -4325,6 +4326,9 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, target_ulong pc_start)
case INDEX_op_exit_tb:
tcg_out_exit_tb(s, op->args[0]);
break;
+ case INDEX_op_goto_tb:
+ tcg_out_goto_tb(s, op->args[0]);
+ break;
case INDEX_op_dup2_vec:
if (tcg_reg_alloc_dup2(s, op)) {
break;
@@ -1897,6 +1897,26 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
}
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /*
+ * Ensure that ADRP+ADD are 8-byte aligned so that an atomic
+ * write can be used to patch the target address.
+ */
+ if ((uintptr_t)s->code_ptr & 7) {
+ tcg_out32(s, NOP);
+ }
+ set_jmp_insn_offset(s, which);
+ /*
+ * actual branch destination will be patched by
+ * tb_target_set_jmp_target later
+ */
+ tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0);
+ tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0);
+ tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
+ set_jmp_reset_offset(s, which);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1916,25 +1936,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
#define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I])
switch (opc) {
- case INDEX_op_goto_tb:
- /*
- * Ensure that ADRP+ADD are 8-byte aligned so that an atomic
- * write can be used to patch the target address.
- */
- if ((uintptr_t)s->code_ptr & 7) {
- tcg_out32(s, NOP);
- }
- set_jmp_insn_offset(s, a0);
- /*
- * actual branch destination will be patched by
- * tb_target_set_jmp_target later
- */
- tcg_out_insn(s, 3406, ADRP, TCG_REG_TMP, 0);
- tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_TMP, TCG_REG_TMP, 0);
- tcg_out_insn(s, 3207, BR, TCG_REG_TMP);
- set_jmp_reset_offset(s, a0);
- break;
-
case INDEX_op_goto_ptr:
tcg_out_insn(s, 3207, BR, a0);
break;
@@ -2305,6 +2306,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
g_assert_not_reached();
}
@@ -1933,6 +1933,31 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
tcg_out_epilogue(s);
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /* Indirect jump method */
+ intptr_t ptr, dif, dil;
+ TCGReg base = TCG_REG_PC;
+
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
+ ptr = get_jmp_target_addr(s, which);
+ dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
+ dil = sextract32(dif, 0, 12);
+ if (dif != dil) {
+ /*
+ * The TB is close, but outside the 12 bits addressable by
+ * the load. We can extend this to 20 bits with a sub of a
+ * shifted immediate from pc. In the vastly unlikely event
+ * the code requires more than 1MB, we'll use 2 insns and
+ * be no worse off.
+ */
+ base = TCG_REG_R0;
+ tcg_out_movi32(s, COND_AL, base, ptr - dil);
+ }
+ tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
+ set_jmp_reset_offset(s, which);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1941,29 +1966,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
int c;
switch (opc) {
- case INDEX_op_goto_tb:
- {
- /* Indirect jump method */
- intptr_t ptr, dif, dil;
- TCGReg base = TCG_REG_PC;
-
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
- ptr = get_jmp_target_addr(s, args[0]);
- dif = tcg_pcrel_diff(s, (void *)ptr) - 8;
- dil = sextract32(dif, 0, 12);
- if (dif != dil) {
- /* The TB is close, but outside the 12 bits addressable by
- the load. We can extend this to 20 bits with a sub of a
- shifted immediate from pc. In the vastly unlikely event
- the code requires more than 1MB, we'll use 2 insns and
- be no worse off. */
- base = TCG_REG_R0;
- tcg_out_movi32(s, COND_AL, base, ptr - dil);
- }
- tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, base, dil);
- set_jmp_reset_offset(s, args[0]);
- }
- break;
case INDEX_op_goto_ptr:
tcg_out_b_reg(s, COND_AL, args[0]);
break;
@@ -2253,6 +2255,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
@@ -2357,6 +2357,22 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
}
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /*
+ * Jump displacement must be aligned for atomic patching;
+ * see if we need to add extra nops before jump
+ */
+ int gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
+ if (gap != 1) {
+ tcg_out_nopn(s, gap - 1);
+ }
+ tcg_out8(s, OPC_JMP_long); /* jmp im */
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, 0);
+ set_jmp_reset_offset(s, which);
+}
+
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -2381,22 +2397,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
const_a2 = const_args[2];
switch (opc) {
- case INDEX_op_goto_tb:
- {
- /*
- * Jump displacement must be aligned for atomic patching;
- * see if we need to add extra nops before jump
- */
- int gap = QEMU_ALIGN_PTR_UP(s->code_ptr + 1, 4) - s->code_ptr;
- if (gap != 1) {
- tcg_out_nopn(s, gap - 1);
- }
- tcg_out8(s, OPC_JMP_long); /* jmp im */
- set_jmp_insn_offset(s, a0);
- tcg_out32(s, 0);
- }
- set_jmp_reset_offset(s, a0);
- break;
case INDEX_op_goto_ptr:
/* jmp to the given host address (could be epilogue) */
tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, a0);
@@ -2791,6 +2791,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
@@ -1078,6 +1078,25 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
}
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /*
+ * Ensure that patch area is 8-byte aligned so that an
+ * atomic write can be used to patch the target address.
+ */
+ if ((uintptr_t)s->code_ptr & 7) {
+ tcg_out_nop(s);
+ }
+ set_jmp_insn_offset(s, which);
+ /*
+ * actual branch destination will be patched by
+ * tb_target_set_jmp_target later
+ */
+ tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, 0);
+ tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
+ set_jmp_reset_offset(s, which);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1088,24 +1107,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
int c2 = const_args[2];
switch (opc) {
- case INDEX_op_goto_tb:
- /*
- * Ensure that patch area is 8-byte aligned so that an
- * atomic write can be used to patch the target address.
- */
- if ((uintptr_t)s->code_ptr & 7) {
- tcg_out_nop(s);
- }
- set_jmp_insn_offset(s, a0);
- /*
- * actual branch destination will be patched by
- * tb_target_set_jmp_target later
- */
- tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, 0);
- tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
- set_jmp_reset_offset(s, a0);
- break;
-
case INDEX_op_mb:
tcg_out_mb(s, a0);
break;
@@ -1500,6 +1501,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
g_assert_not_reached();
}
@@ -1965,6 +1965,17 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
tcg_out_opc_imm(s, OPC_ORI, TCG_REG_V0, b0, a0 & 0xffff);
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /* indirect jump method */
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
+ get_jmp_target_addr(s, which));
+ tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
+ tcg_out_nop(s);
+ set_jmp_reset_offset(s, which);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1984,15 +1995,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
c2 = const_args[2];
switch (opc) {
- case INDEX_op_goto_tb:
- /* indirect jump method */
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO,
- get_jmp_target_addr(s, a0));
- tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0);
- tcg_out_nop(s);
- set_jmp_reset_offset(s, a0);
- break;
case INDEX_op_goto_ptr:
/* jmp to the given host address (could be epilogue) */
tcg_out_opc_reg(s, OPC_JR, 0, a0, 0);
@@ -2401,6 +2403,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
@@ -2622,6 +2622,32 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
tcg_out_b(s, 0, tcg_code_gen_epilogue);
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /* Direct jump. */
+ if (TCG_TARGET_REG_BITS == 64) {
+ /* Ensure the next insns are 8 or 16-byte aligned. */
+ while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
+ tcg_out32(s, NOP);
+ }
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
+ tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
+ tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
+ tcg_out32(s, BCCTR | BO_ALWAYS);
+ set_jmp_reset_offset(s, which);
+ if (USE_REG_TB) {
+ /* For the unlinked case, need to reset TCG_REG_TB. */
+ tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
+ -tcg_current_code_size(s));
+ }
+ } else {
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, B);
+ set_jmp_reset_offset(s, which);
+ }
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -2629,31 +2655,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
TCGArg a0, a1, a2;
switch (opc) {
- case INDEX_op_goto_tb:
- /* Direct jump. */
- if (TCG_TARGET_REG_BITS == 64) {
- /* Ensure the next insns are 8 or 16-byte aligned. */
- while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
- tcg_out32(s, NOP);
- }
- set_jmp_insn_offset(s, args[0]);
- tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
- tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
- } else {
- set_jmp_insn_offset(s, args[0]);
- tcg_out32(s, B);
- set_jmp_reset_offset(s, args[0]);
- break;
- }
- tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
- tcg_out32(s, BCCTR | BO_ALWAYS);
- set_jmp_reset_offset(s, args[0]);
- if (USE_REG_TB) {
- /* For the unlinked case, need to reset TCG_REG_TB. */
- tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
- -tcg_current_code_size(s));
- }
- break;
case INDEX_op_goto_ptr:
tcg_out32(s, MTSPR | RS(args[0]) | CTR);
if (USE_REG_TB) {
@@ -3181,6 +3182,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
@@ -1275,6 +1275,16 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
}
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
+ /* indirect jump method */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
+ get_jmp_target_addr(s, which));
+ tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
+ set_jmp_reset_offset(s, which);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1285,15 +1295,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
int c2 = const_args[2];
switch (opc) {
- case INDEX_op_goto_tb:
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
- /* indirect jump method */
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
- get_jmp_target_addr(s, a0));
- tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0);
- set_jmp_reset_offset(s, a0);
- break;
-
case INDEX_op_goto_ptr:
tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0);
break;
@@ -1594,6 +1595,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
g_assert_not_reached();
}
@@ -2081,6 +2081,41 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
}
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ if (TCG_TARGET_HAS_direct_jump) {
+ /*
+ * Branch displacement must be aligned for atomic patching;
+ * see if we need to add extra nop before branch
+ */
+ if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
+ tcg_out16(s, NOP);
+ }
+ tcg_debug_assert(!USE_REG_TB);
+ tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
+ set_jmp_insn_offset(s, which);
+ s->code_ptr += 2;
+ } else {
+ /* Load address stored in the TB. */
+ tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_REG_TB,
+ (const void *)get_jmp_target_addr(s, which));
+ /* and go there */
+ tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_TB);
+ }
+ set_jmp_reset_offset(s, which);
+
+ /*
+ * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
+ * to the beginning of this TB.
+ */
+ if (USE_REG_TB) {
+ int ofs = -tcg_current_code_size(s);
+ /* All TB are restricted to 64KiB by unwind info. */
+ tcg_debug_assert(ofs == sextract64(ofs, 0, 20));
+ tcg_out_insn(s, RXY, LAY, TCG_REG_TB, TCG_REG_TB, TCG_REG_NONE, ofs);
+ }
+}
+
# define OP_32_64(x) \
case glue(glue(INDEX_op_,x),_i32): \
case glue(glue(INDEX_op_,x),_i64)
@@ -2093,40 +2128,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
TCGArg a0, a1, a2;
switch (opc) {
- case INDEX_op_goto_tb:
- a0 = args[0];
- if (TCG_TARGET_HAS_direct_jump) {
- /*
- * branch displacement must be aligned for atomic patching;
- * see if we need to add extra nop before branch
- */
- if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
- tcg_out16(s, NOP);
- }
- tcg_debug_assert(!USE_REG_TB);
- tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
- set_jmp_insn_offset(s, a0);
- s->code_ptr += 2;
- } else {
- /* load address stored at s->tb_jmp_target_addr + a0 */
- tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_REG_TB,
- (const void *)get_jmp_target_addr(s, a0));
- /* and go there */
- tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_TB);
- }
- set_jmp_reset_offset(s, a0);
-
- /* For the unlinked path of goto_tb, we need to reset
- TCG_REG_TB to the beginning of this TB. */
- if (USE_REG_TB) {
- int ofs = -tcg_current_code_size(s);
- /* All TB are restricted to 64KiB by unwind info. */
- tcg_debug_assert(ofs == sextract64(ofs, 0, 20));
- tcg_out_insn(s, RXY, LAY, TCG_REG_TB,
- TCG_REG_TB, TCG_REG_NONE, ofs);
- }
- break;
-
case INDEX_op_goto_ptr:
a0 = args[0];
if (USE_REG_TB) {
@@ -2662,6 +2663,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
@@ -1436,6 +1436,41 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ /* Direct jump. */
+ if (USE_REG_TB) {
+ /* make sure the patch is 8-byte aligned. */
+ if ((intptr_t)s->code_ptr & 4) {
+ tcg_out_nop(s);
+ }
+ set_jmp_insn_offset(s, which);
+ tcg_out_sethi(s, TCG_REG_T1, 0);
+ tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
+ tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
+ tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
+ } else {
+ set_jmp_insn_offset(s, which);
+ tcg_out32(s, CALL);
+ tcg_out_nop(s);
+ }
+ set_jmp_reset_offset(s, which);
+
+ /*
+ * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
+ * to the beginning of this TB.
+ */
+ if (USE_REG_TB) {
+ int c = -tcg_current_code_size(s);
+ if (check_fit_i32(c, 13)) {
+ tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
+ tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
+ }
+ }
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -1450,38 +1485,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
c2 = const_args[2];
switch (opc) {
- case INDEX_op_goto_tb:
- /* Direct jump. */
- if (USE_REG_TB) {
- /* make sure the patch is 8-byte aligned. */
- if ((intptr_t)s->code_ptr & 4) {
- tcg_out_nop(s);
- }
- set_jmp_insn_offset(s, a0);
- tcg_out_sethi(s, TCG_REG_T1, 0);
- tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
- tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
- } else {
- set_jmp_insn_offset(s, a0);
- tcg_out32(s, CALL);
- tcg_out_nop(s);
- }
- set_jmp_reset_offset(s, a0);
-
- /* For the unlinked path of goto_tb, we need to reset
- TCG_REG_TB to the beginning of this TB. */
- if (USE_REG_TB) {
- c = -tcg_current_code_size(s);
- if (check_fit_i32(c, 13)) {
- tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
- } else {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
- tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
- TCG_REG_T1, ARITH_ADD);
- }
- }
- break;
case INDEX_op_goto_ptr:
tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
if (USE_REG_TB) {
@@ -1700,6 +1703,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
@@ -598,6 +598,14 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
}
+static void tcg_out_goto_tb(TCGContext *s, int which)
+{
+ qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
+ /* indirect jump method. */
+ tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
+ set_jmp_reset_offset(s, which);
+}
+
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
const TCGArg args[TCG_MAX_OP_ARGS],
const int const_args[TCG_MAX_OP_ARGS])
@@ -605,13 +613,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
TCGOpcode exts;
switch (opc) {
- case INDEX_op_goto_tb:
- qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
- /* indirect jump method. */
- tcg_out_op_p(s, opc, (void *)get_jmp_target_addr(s, args[0]));
- set_jmp_reset_offset(s, args[0]);
- break;
-
case INDEX_op_goto_ptr:
tcg_out_op_r(s, opc, args[0]);
break;
@@ -784,6 +785,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_mov_i64:
case INDEX_op_call: /* Always emitted via tcg_out_call. */
case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
+ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
default:
tcg_abort();
}
The INDEX_op_goto_tb opcode needs no register allocation. Split out a dedicated helper function for it. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- tcg/tcg.c | 4 ++ tcg/aarch64/tcg-target.c.inc | 40 +++++++++--------- tcg/arm/tcg-target.c.inc | 49 +++++++++++----------- tcg/i386/tcg-target.c.inc | 33 +++++++-------- tcg/loongarch64/tcg-target.c.inc | 38 +++++++++-------- tcg/mips/tcg-target.c.inc | 21 ++++++---- tcg/ppc/tcg-target.c.inc | 52 ++++++++++++------------ tcg/riscv/tcg-target.c.inc | 20 +++++---- tcg/s390x/tcg-target.c.inc | 70 ++++++++++++++++---------------- tcg/sparc64/tcg-target.c.inc | 68 ++++++++++++++++--------------- tcg/tci/tcg-target.c.inc | 16 ++++---- 11 files changed, 219 insertions(+), 192 deletions(-)