@@ -1886,8 +1886,8 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
* Record the context of a call to the out of line helper code for the slow path
* for a load or store, so that we can later generate the correct helper code
*/
-static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
- MemOpIdx oi,
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld,
+ TCGType type, MemOpIdx oi,
TCGReg datalo, TCGReg datahi,
TCGReg addrlo, TCGReg addrhi,
tcg_insn_unit *raddr,
@@ -1897,7 +1897,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64,
label->is_ld = is_ld;
label->oi = oi;
- label->type = is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
+ label->type = type;
label->datalo_reg = datalo;
label->datahi_reg = datahi;
label->addrlo_reg = addrlo;
@@ -2154,11 +2154,10 @@ static inline int setup_guest_base_seg(void)
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
TCGReg base, int index, intptr_t ofs,
- int seg, bool is64, MemOp memop)
+ int seg, TCGType type, MemOp memop)
{
- TCGType type = is64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
bool use_movbe = false;
- int rexw = is64 * P_REXW;
+ int rexw = (type == TCG_TYPE_I32 ? 0 : P_REXW);
int movop = OPC_MOVL_GvEv;
/* Do big-endian loads with movbe. */
@@ -2248,50 +2247,34 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
}
}
-/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
- EAX. It will be useful once fixed registers globals are less
- common. */
-static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
+ TCGReg addrlo, TCGReg addrhi,
+ MemOpIdx oi, TCGType data_type)
{
- TCGReg datalo, datahi, addrlo;
- TCGReg addrhi __attribute__((unused));
- MemOpIdx oi;
- MemOp opc;
+ MemOp opc = get_memop(oi);
+
#if defined(CONFIG_SOFTMMU)
- int mem_index;
tcg_insn_unit *label_ptr[2];
-#else
- unsigned a_bits;
-#endif
- datalo = *args++;
- datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
- addrlo = *args++;
- addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
- oi = *args++;
- opc = get_memop(oi);
-
-#if defined(CONFIG_SOFTMMU)
- mem_index = get_mmuidx(oi);
-
- tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
+ tcg_out_tlb_load(s, addrlo, addrhi, get_mmuidx(oi), opc,
label_ptr, offsetof(CPUTLBEntry, addr_read));
/* TLB Hit. */
- tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, is64, opc);
+ tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1,
+ -1, 0, 0, data_type, opc);
/* Record the current context of a load into ldst label */
- add_qemu_ldst_label(s, true, is64, oi, datalo, datahi, addrlo, addrhi,
- s->code_ptr, label_ptr);
+ add_qemu_ldst_label(s, true, data_type, oi, datalo, datahi,
+ addrlo, addrhi, s->code_ptr, label_ptr);
#else
- a_bits = get_alignment_bits(opc);
+ unsigned a_bits = get_alignment_bits(opc);
if (a_bits) {
tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
}
tcg_out_qemu_ld_direct(s, datalo, datahi, addrlo, x86_guest_base_index,
x86_guest_base_offset, x86_guest_base_seg,
- is64, opc);
+ data_type, opc);
#endif
}
@@ -2347,40 +2330,26 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
}
}
-static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
+static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
+ TCGReg addrlo, TCGReg addrhi,
+ MemOpIdx oi, TCGType data_type)
{
- TCGReg datalo, datahi, addrlo;
- TCGReg addrhi __attribute__((unused));
- MemOpIdx oi;
- MemOp opc;
+ MemOp opc = get_memop(oi);
+
#if defined(CONFIG_SOFTMMU)
- int mem_index;
tcg_insn_unit *label_ptr[2];
-#else
- unsigned a_bits;
-#endif
- datalo = *args++;
- datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
- addrlo = *args++;
- addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
- oi = *args++;
- opc = get_memop(oi);
-
-#if defined(CONFIG_SOFTMMU)
- mem_index = get_mmuidx(oi);
-
- tcg_out_tlb_load(s, addrlo, addrhi, mem_index, opc,
+ tcg_out_tlb_load(s, addrlo, addrhi, get_mmuidx(oi), opc,
label_ptr, offsetof(CPUTLBEntry, addr_write));
/* TLB Hit. */
tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, -1, 0, 0, opc);
/* Record the current context of a store into ldst label */
- add_qemu_ldst_label(s, false, is64, oi, datalo, datahi, addrlo, addrhi,
- s->code_ptr, label_ptr);
+ add_qemu_ldst_label(s, false, data_type, oi, datalo, datahi,
+ addrlo, addrhi, s->code_ptr, label_ptr);
#else
- a_bits = get_alignment_bits(opc);
+ unsigned a_bits = get_alignment_bits(opc);
if (a_bits) {
tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
}
@@ -2675,17 +2644,37 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_qemu_ld_i32:
- tcg_out_qemu_ld(s, args, 0);
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
+ tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
+ } else {
+ tcg_out_qemu_ld(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32);
+ }
break;
case INDEX_op_qemu_ld_i64:
- tcg_out_qemu_ld(s, args, 1);
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_qemu_ld(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
+ } else if (TARGET_LONG_BITS == 32) {
+ tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
+ } else {
+ tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
+ }
break;
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st8_i32:
- tcg_out_qemu_st(s, args, 0);
+ if (TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
+ tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I32);
+ } else {
+ tcg_out_qemu_st(s, a0, -1, a1, a2, args[3], TCG_TYPE_I32);
+ }
break;
case INDEX_op_qemu_st_i64:
- tcg_out_qemu_st(s, args, 1);
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_qemu_st(s, a0, -1, a1, -1, a2, TCG_TYPE_I64);
+ } else if (TARGET_LONG_BITS == 32) {
+ tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I64);
+ } else {
+ tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
+ }
break;
OP_32_64(mulu2):