@@ -1772,7 +1772,7 @@ static void * const qemu_st_helpers[MO_SIZE + 1] = {
* Record the context of a call to the out of line helper code for the slow path
* for a load or store, so that we can later generate the correct helper code
*/
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGType type,
MemOpIdx oi,
TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addrhi,
@@ -1783,7 +1783,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
label->is_ld = is_ld;
label->oi = oi;
- label->type = is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
+ label->type = type;
label->datalo_reg = datalo;
label->datahi_reg = datahi;
label->addrlo_reg = addrlo;
@@ -2124,10 +2124,10 @@ static inline int setup_guest_base_seg(void)
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
TCGReg base, int index, intptr_t ofs,
- int seg, bool is64, MemOp memop)
+ int seg, TCGType type, MemOp memop)
{
bool use_movbe = false;
- int rexw = is64 * P_REXW;
+ int rexw = (type == TCG_TYPE_I32 ? 0 : P_REXW);
int movop = OPC_MOVL_GvEv;
/* Do big-endian loads with movbe. */
@@ -2220,7 +2220,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
EAX. It will be useful once fixed registers globals are less
common. */
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
+static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
{
TCGReg datalo, datahi, addrlo;
TCGReg addrhi __attribute__((unused));
@@ -2232,7 +2232,16 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
#endif
datalo = *args++;
- datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
+ switch (type) {
+ case TCG_TYPE_I32:
+ datahi = 0;
+ break;
+ case TCG_TYPE_I64:
+ datahi = (TCG_TARGET_REG_BITS == 32 ? *args++ : 0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
addrlo = *args++;
addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
oi = *args++;
@@ -2243,10 +2252,10 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
label_ptr, offsetof(CPUTLBEntry, addr_read));
/* TLB Hit. */
- tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, is64, opc);
+ tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, type, opc);
/* Record the current context of a load into ldst label */
- add_qemu_ldst_label(s, true, is64, oi, datalo, datahi,
+ add_qemu_ldst_label(s, true, type, oi, datalo, datahi,
TCG_REG_L1, addrhi, s->code_ptr, label_ptr);
#else
a_bits = get_alignment_bits(opc);
@@ -2255,9 +2264,9 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
}
tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
x86_guest_base_offset, x86_guest_base_seg,
- is64, opc);
+ type, opc);
if (a_bits) {
- add_qemu_ldst_label(s, true, is64, oi, datalo, datahi,
+ add_qemu_ldst_label(s, true, type, oi, datalo, datahi,
addrlo, addrhi, s->code_ptr, label_ptr);
}
#endif
@@ -2315,7 +2324,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
}
}
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
+static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGType type)
{
TCGReg datalo, datahi, addrlo;
TCGReg addrhi __attribute__((unused));
@@ -2327,7 +2336,16 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
#endif
datalo = *args++;
- datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
+ switch (type) {
+ case TCG_TYPE_I32:
+ datahi = 0;
+ break;
+ case TCG_TYPE_I64:
+ datahi = (TCG_TARGET_REG_BITS == 32 ? *args++ : 0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
addrlo = *args++;
addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
oi = *args++;
@@ -2341,7 +2359,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc);
/* Record the current context of a store into ldst label */
- add_qemu_ldst_label(s, false, is64, oi, datalo, datahi,
+ add_qemu_ldst_label(s, false, type, oi, datalo, datahi,
TCG_REG_L1, addrhi, s->code_ptr, label_ptr);
#else
a_bits = get_alignment_bits(opc);
@@ -2351,7 +2369,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
tcg_out_qemu_st_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
x86_guest_base_offset, x86_guest_base_seg, opc);
if (a_bits) {
- add_qemu_ldst_label(s, false, is64, oi, datalo, datahi,
+ add_qemu_ldst_label(s, false, type, oi, datalo, datahi,
addrlo, addrhi, s->code_ptr, label_ptr);
}
#endif
@@ -2655,17 +2673,17 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_qemu_ld_i32:
- tcg_out_qemu_ld(s, args, 0);
+ tcg_out_qemu_ld(s, args, TCG_TYPE_I32);
break;
case INDEX_op_qemu_ld_i64:
- tcg_out_qemu_ld(s, args, 1);
+ tcg_out_qemu_ld(s, args, TCG_TYPE_I64);
break;
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st8_i32:
- tcg_out_qemu_st(s, args, 0);
+ tcg_out_qemu_st(s, args, TCG_TYPE_I32);
break;
case INDEX_op_qemu_st_i64:
- tcg_out_qemu_st(s, args, 1);
+ tcg_out_qemu_st(s, args, TCG_TYPE_I64);
break;
OP_32_64(mulu2):