@@ -213,6 +213,14 @@ DEF(qemu_st8_i32, 0, TLADDR_ARGS + 1, 1,
TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS |
IMPL(TCG_TARGET_HAS_qemu_st8_i32))
+/* Only for 64-bit hosts at the moment. */
+DEF(qemu_ld_i128, 2, 1, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
+ IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
+DEF(qemu_st_i128, 0, 3, 1,
+ TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT |
+ IMPL(TCG_TARGET_HAS_qemu_ldst_i128))
+
/* Host vector support. */
#define IMPLVEC TCG_OPF_VECTOR | IMPL(TCG_TARGET_MAYBE_vec)
@@ -130,6 +130,8 @@ extern bool have_lse2;
#define TCG_TARGET_HAS_mulsh_i64 1
#define TCG_TARGET_HAS_direct_jump 1
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
+
#define TCG_TARGET_HAS_v64 1
#define TCG_TARGET_HAS_v128 1
#define TCG_TARGET_HAS_v256 0
@@ -126,6 +126,8 @@ extern bool use_neon_instructions;
#define TCG_TARGET_HAS_direct_jump 0
#define TCG_TARGET_HAS_qemu_st8_i32 0
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
+
#define TCG_TARGET_HAS_v64 use_neon_instructions
#define TCG_TARGET_HAS_v128 use_neon_instructions
#define TCG_TARGET_HAS_v256 0
@@ -195,6 +195,8 @@ extern bool have_atomic16;
#define TCG_TARGET_HAS_qemu_st8_i32 1
#endif
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
+
/* We do not support older SSE systems, only beginning with AVX1. */
#define TCG_TARGET_HAS_v64 have_avx1
#define TCG_TARGET_HAS_v128 have_avx1
@@ -173,6 +173,8 @@ typedef enum {
#define TCG_TARGET_HAS_muluh_i64 1
#define TCG_TARGET_HAS_mulsh_i64 1
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
+
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
#define TCG_TARGET_DEFAULT_MO (0)
@@ -204,6 +204,8 @@ extern bool use_mips32r2_instructions;
#define TCG_TARGET_HAS_ext16u_i64 0 /* andi rt, rs, 0xffff */
#endif
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
+
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
@@ -151,6 +151,8 @@ extern bool have_vsx;
#define TCG_TARGET_HAS_mulsh_i64 1
#endif
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
+
/*
* While technically Altivec could support V64, it has no 64-bit store
* instruction and substituting two 32-bit stores makes the generated
@@ -168,6 +168,8 @@ typedef enum {
#define TCG_TARGET_HAS_mulsh_i64 1
#endif
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
+
/* not defined -- call should be eliminated at compile time */
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
@@ -139,6 +139,8 @@ extern uint64_t s390_facilities[3];
#define TCG_TARGET_HAS_muluh_i64 0
#define TCG_TARGET_HAS_mulsh_i64 0
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
+
#define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR)
#define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR)
#define TCG_TARGET_HAS_v256 0
@@ -152,6 +152,8 @@ extern bool use_vis3_instructions;
#define TCG_TARGET_HAS_muluh_i64 use_vis3_instructions
#define TCG_TARGET_HAS_mulsh_i64 0
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
+
#define TCG_AREG0 TCG_REG_I0
#define TCG_TARGET_DEFAULT_MO (0)
@@ -128,6 +128,8 @@
#define TCG_TARGET_HAS_mulu2_i32 1
#endif /* TCG_TARGET_REG_BITS == 64 */
+#define TCG_TARGET_HAS_qemu_ldst_i128 0
+
/* Number of registers available. */
#define TCG_TARGET_NB_REGS 16
@@ -3201,7 +3201,7 @@ static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig)
void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop)
{
- MemOpIdx oi = make_memop_idx(memop, idx);
+ const MemOpIdx oi = make_memop_idx(memop, idx);
tcg_debug_assert((memop & MO_SIZE) == MO_128);
tcg_debug_assert((memop & MO_SIGN) == 0);
@@ -3209,9 +3209,35 @@ void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop)
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
addr = plugin_prep_mem_callbacks(addr);
- /* TODO: allow the tcg backend to see the whole operation. */
+ /* TODO: For now, force 32-bit hosts to use the helper. */
+ if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
+ TCGv_i64 lo, hi;
+ TCGArg addr_arg;
+ MemOpIdx adj_oi;
- if (use_two_i64_for_i128(memop)) {
+ /* TODO: Make TCG_TARGET_HAS_MEMORY_BSWAP fine grained. */
+ if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
+ lo = TCGV128_HIGH(val);
+ hi = TCGV128_LOW(val);
+ adj_oi = make_memop_idx(memop & ~MO_BSWAP, idx);
+ } else {
+ lo = TCGV128_LOW(val);
+ hi = TCGV128_HIGH(val);
+ adj_oi = oi;
+ }
+
+#if TARGET_LONG_BITS == 32
+ addr_arg = tcgv_i32_arg(addr);
+#else
+ addr_arg = tcgv_i64_arg(addr);
+#endif
+ tcg_gen_op4ii_i64(INDEX_op_qemu_ld_i128, lo, hi, addr_arg, adj_oi);
+
+ if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
+ tcg_gen_bswap64_i64(lo, lo);
+ tcg_gen_bswap64_i64(hi, hi);
+ }
+ } else if (use_two_i64_for_i128(memop)) {
MemOp mop[2];
TCGv addr_p8;
TCGv_i64 x, y;
@@ -3254,7 +3280,7 @@ void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop)
void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop)
{
- MemOpIdx oi = make_memop_idx(memop, idx);
+ const MemOpIdx oi = make_memop_idx(memop, idx);
tcg_debug_assert((memop & MO_SIZE) == MO_128);
tcg_debug_assert((memop & MO_SIGN) == 0);
@@ -3262,9 +3288,38 @@ void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop)
tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST);
addr = plugin_prep_mem_callbacks(addr);
- /* TODO: allow the tcg backend to see the whole operation. */
+ /* TODO: For now, force 32-bit hosts to use the helper. */
- if (use_two_i64_for_i128(memop)) {
+ if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
+ TCGv_i64 lo, hi;
+ TCGArg addr_arg;
+ MemOpIdx adj_oi;
+
+ /* TODO: Make TCG_TARGET_HAS_MEMORY_BSWAP fine grained. */
+ if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
+ lo = tcg_temp_new_i64();
+ hi = tcg_temp_new_i64();
+ tcg_gen_bswap64_i64(lo, TCGV128_HIGH(val));
+ tcg_gen_bswap64_i64(hi, TCGV128_LOW(val));
+ adj_oi = make_memop_idx(memop & ~MO_BSWAP, idx);
+ } else {
+ lo = TCGV128_LOW(val);
+ hi = TCGV128_HIGH(val);
+ adj_oi = oi;
+ }
+
+#if TARGET_LONG_BITS == 32
+ addr_arg = tcgv_i32_arg(addr);
+#else
+ addr_arg = tcgv_i64_arg(addr);
+#endif
+ tcg_gen_op4ii_i64(INDEX_op_qemu_st_i128, lo, hi, addr_arg, adj_oi);
+
+ if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
+ tcg_temp_free_i64(lo);
+ tcg_temp_free_i64(hi);
+ }
+ } else if (use_two_i64_for_i128(memop)) {
MemOp mop[2];
TCGv addr_p8;
TCGv_i64 x, y;
@@ -1497,6 +1497,10 @@ bool tcg_op_supported(TCGOpcode op)
case INDEX_op_qemu_st8_i32:
return TCG_TARGET_HAS_qemu_st8_i32;
+ case INDEX_op_qemu_ld_i128:
+ case INDEX_op_qemu_st_i128:
+ return TCG_TARGET_HAS_qemu_ldst_i128;
+
case INDEX_op_mov_i32:
case INDEX_op_setcond_i32:
case INDEX_op_brcond_i32:
@@ -512,8 +512,8 @@ jump to the TCG epilogue to go back to the exec loop.
This operation is optional. If the TCG backend does not implement the
goto_ptr opcode, emitting this op is equivalent to emitting exit_tb(0).
-* qemu_ld_i32/i64 t0, t1, flags, memidx
-* qemu_st_i32/i64 t0, t1, flags, memidx
+* qemu_ld_i32/i64/i128 t0, t1, flags, memidx
+* qemu_st_i32/i64/i128 t0, t1, flags, memidx
* qemu_st8_i32 t0, t1, flags, memidx
Load data at the guest address t1 into t0, or store data in t0 at guest
@@ -522,7 +522,8 @@ register t0 only. The address t1 is always sized according to the guest,
and the width of the memory operation is controlled by flags.
Both t0 and t1 may be split into little-endian ordered pairs of registers
-if dealing with 64-bit quantities on a 32-bit host.
+if dealing with 64-bit quantities on a 32-bit host, or 128-bit quantities
+on a 64-bit host.
The memidx selects the qemu tlb index to use (e.g. user or kernel access).
The flags are the MemOp bits, selecting the sign, width, and endianness
@@ -531,6 +532,9 @@ of the memory access.
For a 32-bit host, qemu_ld/st_i64 is guaranteed to only be used with a
64-bit memory access specified in flags.
+For qemu_ld/st_i128, these are only supported for a 64-bit host, and are
+guaranteed to be used with the host memory ordering.
+
For i386, qemu_st8_i32 is exactly like qemu_st_i32, except the size of
the memory operation is known to be 8-bit. This allows the backend to
provide a different set of register constraints.
Add opcodes for backend support for 128-bit memory operations. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- include/tcg/tcg-opc.h | 8 +++++ tcg/aarch64/tcg-target.h | 2 ++ tcg/arm/tcg-target.h | 2 ++ tcg/i386/tcg-target.h | 2 ++ tcg/loongarch64/tcg-target.h | 2 ++ tcg/mips/tcg-target.h | 2 ++ tcg/ppc/tcg-target.h | 2 ++ tcg/riscv/tcg-target.h | 2 ++ tcg/s390x/tcg-target.h | 2 ++ tcg/sparc64/tcg-target.h | 2 ++ tcg/tci/tcg-target.h | 2 ++ tcg/tcg-op.c | 67 ++++++++++++++++++++++++++++++++---- tcg/tcg.c | 4 +++ tcg/README | 10 ++++-- 14 files changed, 100 insertions(+), 9 deletions(-)