@@ -149,7 +149,7 @@ typedef enum {
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t);
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
#ifdef CONFIG_SOFTMMU
#define TCG_TARGET_NEED_LDST_LABELS
@@ -136,7 +136,7 @@ enum {
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
/* not defined -- call should be eliminated at compile time */
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t);
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
#ifdef CONFIG_SOFTMMU
#define TCG_TARGET_NEED_LDST_LABELS
@@ -210,11 +210,11 @@ extern bool have_movbe;
#define TCG_TARGET_extract_i64_valid(ofs, len) \
(((ofs) == 8 && (len) == 8) || ((ofs) + (len)) == 32)
-static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
- uintptr_t jmp_addr, uintptr_t addr)
+static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
+ uintptr_t jmp_rw, uintptr_t addr)
{
/* patch the branch destination */
- qatomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
+ qatomic_set((int32_t *)jmp_rw, addr - (jmp_rx + 4));
/* no need to flush icache explicitly */
}
@@ -202,7 +202,7 @@ extern bool use_mips32r2_instructions;
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t);
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
#ifdef CONFIG_SOFTMMU
#define TCG_TARGET_NEED_LDST_LABELS
@@ -176,7 +176,7 @@ extern bool have_vsx;
#define TCG_TARGET_HAS_bitsel_vec have_vsx
#define TCG_TARGET_HAS_cmpsel_vec 0
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t);
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
@@ -161,7 +161,7 @@ typedef enum {
#endif
/* not defined -- call should be eliminated at compile time */
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t);
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
#define TCG_TARGET_DEFAULT_MO (0)
@@ -146,12 +146,12 @@ enum {
TCG_AREG0 = TCG_REG_R10,
};
-static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
- uintptr_t jmp_addr, uintptr_t addr)
+static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
+ uintptr_t jmp_rw, uintptr_t addr)
{
/* patch the branch destination */
- intptr_t disp = addr - (jmp_addr - 2);
- qatomic_set((int32_t *)jmp_addr, disp / 2);
+ intptr_t disp = addr - (jmp_rx - 2);
+ qatomic_set((int32_t *)jmp_rw, disp / 2);
/* no need to flush icache explicitly */
}
@@ -169,7 +169,7 @@ extern bool use_vis3_instructions;
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
-void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t);
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
#define TCG_TARGET_NEED_POOL_LABELS
@@ -199,11 +199,11 @@ void tci_disas(uint8_t opc);
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
-static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
- uintptr_t jmp_addr, uintptr_t addr)
+static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
+ uintptr_t jmp_rw, uintptr_t addr)
{
/* patch the branch destination */
- qatomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
+ qatomic_set((int32_t *)jmp_rw, addr - (jmp_rx + 4));
/* no need to flush icache explicitly */
}
@@ -371,7 +371,9 @@ void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
if (TCG_TARGET_HAS_direct_jump) {
uintptr_t offset = tb->jmp_target_arg[n];
uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
- tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
+ uintptr_t jmp_rx = tc_ptr + offset;
+ uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
+ tb_target_set_jmp_target(tc_ptr, jmp_rx, jmp_rw, addr);
} else {
tb->jmp_target_arg[n] = addr;
}
@@ -1340,21 +1340,21 @@ static inline void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
}
}
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
- uintptr_t addr)
+void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
+ uintptr_t jmp_rw, uintptr_t addr)
{
tcg_insn_unit i1, i2;
TCGType rt = TCG_TYPE_I64;
TCGReg rd = TCG_REG_TMP;
uint64_t pair;
- ptrdiff_t offset = addr - jmp_addr;
+ ptrdiff_t offset = addr - jmp_rx;
if (offset == sextract64(offset, 0, 26)) {
i1 = I3206_B | ((offset >> 2) & 0x3ffffff);
i2 = NOP;
} else {
- offset = (addr >> 12) - (jmp_addr >> 12);
+ offset = (addr >> 12) - (jmp_rx >> 12);
/* patch ADRP */
i1 = I3406_ADRP | (offset & 3) << 29 | (offset & 0x1ffffc) << (5 - 2) | rd;
@@ -1362,8 +1362,8 @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
i2 = I3401_ADDI | rt << 31 | (addr & 0xfff) << 10 | rd << 5 | rd;
}
pair = (uint64_t)i2 << 32 | i1;
- qatomic_set((uint64_t *)jmp_addr, pair);
- flush_idcache_range(jmp_addr, jmp_addr, 8);
+ qatomic_set((uint64_t *)jmp_rw, pair);
+ flush_idcache_range(jmp_rx, jmp_rw, 8);
}
static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
@@ -2657,11 +2657,11 @@ static void tcg_target_init(TCGContext *s)
tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */
}
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
- uintptr_t addr)
+void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
+ uintptr_t jmp_rw, uintptr_t addr)
{
- qatomic_set((uint32_t *)jmp_addr, deposit32(OPC_J, 0, 26, addr >> 2));
- flush_idcache_range(jmp_addr, jmp_addr, 4);
+ qatomic_set((uint32_t *)jmp_rw, deposit32(OPC_J, 0, 26, addr >> 2));
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
}
typedef struct {
@@ -1722,13 +1722,13 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
tcg_out32(s, insn);
}
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
- uintptr_t addr)
+void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
+ uintptr_t jmp_rw, uintptr_t addr)
{
if (TCG_TARGET_REG_BITS == 64) {
tcg_insn_unit i1, i2;
intptr_t tb_diff = addr - tc_ptr;
- intptr_t br_diff = addr - (jmp_addr + 4);
+ intptr_t br_diff = addr - (jmp_rx + 4);
uint64_t pair;
/* This does not exercise the range of the branch, but we do
@@ -1752,13 +1752,13 @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
/* As per the enclosing if, this is ppc64. Avoid the _Static_assert
within qatomic_set that would fail to build a ppc32 host. */
- qatomic_set__nocheck((uint64_t *)jmp_addr, pair);
- flush_idcache_range(jmp_addr, jmp_addr, 8);
+ qatomic_set__nocheck((uint64_t *)jmp_rw, pair);
+ flush_idcache_range(jmp_rx, jmp_rw, 8);
} else {
- intptr_t diff = addr - jmp_addr;
+ intptr_t diff = addr - jmp_rx;
tcg_debug_assert(in_range_b(diff));
- qatomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc));
- flush_idcache_range(jmp_addr, jmp_addr, 4);
+ qatomic_set((uint32_t *)jmp_rw, B | (diff & 0x3fffffc));
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
}
}
@@ -1821,11 +1821,11 @@ void tcg_register_jit(const void *buf, size_t buf_size)
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
}
-void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
- uintptr_t addr)
+void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
+ uintptr_t jmp_rw, uintptr_t addr)
{
intptr_t tb_disp = addr - tc_ptr;
- intptr_t br_disp = addr - jmp_addr;
+ intptr_t br_disp = addr - jmp_rx;
tcg_insn_unit i1, i2;
/* We can reach the entire address space for ILP32.
@@ -1834,9 +1834,9 @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
tcg_debug_assert(br_disp == (int32_t)br_disp);
if (!USE_REG_TB) {
- qatomic_set((uint32_t *)jmp_addr,
+ qatomic_set((uint32_t *)jmp_rw,
deposit32(CALL, 0, 30, br_disp >> 2));
- flush_idcache_range(jmp_addr, jmp_addr, 4);
+ flush_idcache_range(jmp_rx, jmp_rw, 4);
return;
}
@@ -1859,6 +1859,6 @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
| INSN_IMM13((tb_disp & 0x3ff) | -0x400));
}
- qatomic_set((uint64_t *)jmp_addr, deposit64(i2, 32, 32, i1));
- flush_idcache_range(jmp_addr, jmp_addr, 8);
+ qatomic_set((uint64_t *)jmp_rw, deposit64(i2, 32, 32, i1));
+ flush_idcache_range(jmp_rx, jmp_rw, 8);
}