@@ -12,19 +12,19 @@
C_O0_I1(r)
C_O0_I2(r, r)
C_O0_I2(r, rIN)
-C_O0_I2(s, s)
+C_O0_I2(q, q)
C_O0_I2(w, r)
-C_O0_I3(s, s, s)
-C_O0_I3(S, p, s)
+C_O0_I3(q, q, q)
+C_O0_I3(Q, p, q)
C_O0_I4(r, r, rI, rI)
-C_O0_I4(S, p, s, s)
-C_O1_I1(r, l)
+C_O0_I4(Q, p, q, q)
+C_O1_I1(r, q)
C_O1_I1(r, r)
C_O1_I1(w, r)
C_O1_I1(w, w)
C_O1_I1(w, wr)
C_O1_I2(r, 0, rZ)
-C_O1_I2(r, l, l)
+C_O1_I2(r, q, q)
C_O1_I2(r, r, r)
C_O1_I2(r, r, rI)
C_O1_I2(r, r, rIK)
@@ -39,8 +39,8 @@ C_O1_I2(w, w, wZ)
C_O1_I3(w, w, w, w)
C_O1_I4(r, r, r, rI, rI)
C_O1_I4(r, r, rIN, rIK, 0)
-C_O2_I1(e, p, l)
-C_O2_I2(e, p, l, l)
+C_O2_I1(e, p, q)
+C_O2_I2(e, p, q, q)
C_O2_I2(r, r, r, r)
C_O2_I4(r, r, r, r, rIN, rIK)
C_O2_I4(r, r, rI, rI, rIN, rIK)
@@ -10,9 +10,8 @@
*/
REGS('e', ALL_GENERAL_REGS & 0x5555) /* even regs */
REGS('r', ALL_GENERAL_REGS)
-REGS('l', ALL_QLOAD_REGS)
-REGS('s', ALL_QSTORE_REGS)
-REGS('S', ALL_QSTORE_REGS & 0x5555) /* even qstore */
+REGS('q', ALL_QLDST_REGS)
+REGS('Q', ALL_QLDST_REGS & 0x5555) /* even qldst */
REGS('w', ALL_VECTOR_REGS)
/*
@@ -353,23 +353,16 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
#define ALL_VECTOR_REGS 0xffff0000u
/*
- * r0-r2 will be overwritten when reading the tlb entry (softmmu only)
- * and r0-r1 doing the byte swapping, so don't use these.
- * r3 is removed for softmmu to avoid clashes with helper arguments.
+ * r0-r3 will be overwritten when reading the tlb entry (softmmu only);
+ * r14 will be overwritten by the BLNE branching to the slow path.
*/
#ifdef CONFIG_SOFTMMU
-#define ALL_QLOAD_REGS \
+#define ALL_QLDST_REGS \
(ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
(1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \
(1 << TCG_REG_R14)))
-#define ALL_QSTORE_REGS \
- (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
- (1 << TCG_REG_R2) | (1 << TCG_REG_R14) | \
- ((TARGET_LONG_BITS == 64) << TCG_REG_R3)))
#else
-#define ALL_QLOAD_REGS ALL_GENERAL_REGS
-#define ALL_QSTORE_REGS \
- (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1)))
+#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R14))
#endif
/*
@@ -2203,13 +2196,13 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
return C_O1_I4(r, r, r, rI, rI);
case INDEX_op_qemu_ld_i32:
- return TARGET_LONG_BITS == 32 ? C_O1_I1(r, l) : C_O1_I2(r, l, l);
+ return TARGET_LONG_BITS == 32 ? C_O1_I1(r, q) : C_O1_I2(r, q, q);
case INDEX_op_qemu_ld_i64:
- return TARGET_LONG_BITS == 32 ? C_O2_I1(e, p, l) : C_O2_I2(e, p, l, l);
+ return TARGET_LONG_BITS == 32 ? C_O2_I1(e, p, q) : C_O2_I2(e, p, q, q);
case INDEX_op_qemu_st_i32:
- return TARGET_LONG_BITS == 32 ? C_O0_I2(s, s) : C_O0_I3(s, s, s);
+ return TARGET_LONG_BITS == 32 ? C_O0_I2(q, q) : C_O0_I3(q, q, q);
case INDEX_op_qemu_st_i64:
- return TARGET_LONG_BITS == 32 ? C_O0_I3(S, p, s) : C_O0_I4(S, p, s, s);
+ return TARGET_LONG_BITS == 32 ? C_O0_I3(Q, p, q) : C_O0_I4(Q, p, q, q);
case INDEX_op_st_vec:
return C_O0_I2(w, r);
Always reserve r3 for tlb softmmu lookup. Fix a bug in user-only ALL_QLDST_REGS, in that r14 is clobbered by the BLNE that leads to the misaligned trap. Remove r0+r1 from user-only ALL_QLDST_REGS; I believe these had been reserved for bswap, which we no longer perform during qemu_st. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- tcg/arm/tcg-target-con-set.h | 16 ++++++++-------- tcg/arm/tcg-target-con-str.h | 5 ++--- tcg/arm/tcg-target.c.inc | 23 ++++++++--------------- 3 files changed, 18 insertions(+), 26 deletions(-)