@@ -1900,7 +1900,6 @@ static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
}
/* asi moves */
-#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
typedef enum {
GET_ASI_HELPER,
GET_ASI_EXCP,
@@ -2149,8 +2148,22 @@ static DisasASI get_asi(DisasContext *dc, int insn, MemOp memop)
return resolve_asi(dc, asi, memop);
}
-static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
- int insn, MemOp memop)
+#if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
+static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
+ TCGv_i32 asi, TCGv_i32 mop)
+{
+ g_assert_not_reached();
+}
+
+static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
+ TCGv_i32 asi, TCGv_i32 mop)
+{
+ g_assert_not_reached();
+}
+#endif
+
+static void __attribute__((unused))
+gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn, MemOp memop)
{
DisasASI da = get_asi(dc, insn, memop);
@@ -2184,8 +2197,8 @@ static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
}
}
-static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
- int insn, MemOp memop)
+static void __attribute__((unused))
+gen_st_asi(DisasContext *dc, TCGv src, TCGv addr, int insn, MemOp memop)
{
DisasASI da = get_asi(dc, insn, memop);
@@ -2260,8 +2273,8 @@ static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
}
}
-static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
- TCGv addr, int insn)
+static void __attribute__((unused))
+gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src, TCGv addr, int insn)
{
DisasASI da = get_asi(dc, insn, MO_TEUL);
@@ -2278,8 +2291,8 @@ static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
}
}
-static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
- int insn, int rd)
+static void __attribute__((unused))
+gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv, int insn, int rd)
{
DisasASI da = get_asi(dc, insn, MO_TEUL);
TCGv oldv;
@@ -2300,7 +2313,8 @@ static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
}
}
-static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
+static void __attribute__((unused))
+gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
{
DisasASI da = get_asi(dc, insn, MO_UB);
@@ -2335,11 +2349,9 @@ static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
break;
}
}
-#endif
-#ifdef TARGET_SPARC64
-static void gen_ldf_asi(DisasContext *dc, TCGv addr,
- int insn, int size, int rd)
+static void __attribute__((unused))
+gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
{
DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
TCGv_i32 d32;
@@ -2447,8 +2459,8 @@ static void gen_ldf_asi(DisasContext *dc, TCGv addr,
}
}
-static void gen_stf_asi(DisasContext *dc, TCGv addr,
- int insn, int size, int rd)
+static void __attribute__((unused))
+gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
{
DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
TCGv_i32 d32;
@@ -2530,21 +2542,23 @@ static void gen_stf_asi(DisasContext *dc, TCGv addr,
}
}
-static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
+static void __attribute__((unused))
+gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
{
DisasASI da = get_asi(dc, insn, MO_TEUQ);
- TCGv_i64 hi = gen_dest_gpr(dc, rd);
- TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
+ TCGv hi = gen_dest_gpr(dc, rd);
+ TCGv lo = gen_dest_gpr(dc, rd + 1);
switch (da.type) {
case GET_ASI_EXCP:
return;
case GET_ASI_DTWINX:
+ assert(TARGET_LONG_BITS == 64);
gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
+ tcg_gen_qemu_ld_tl(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
tcg_gen_addi_tl(addr, addr, 8);
- tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_ld_tl(lo, addr, da.mem_idx, da.memop);
break;
case GET_ASI_DIRECT:
@@ -2558,9 +2572,9 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
result is byte swapped. Having just performed one
64-bit bswap, we need now to swap the writebacks. */
if ((da.memop & MO_BSWAP) == MO_TE) {
- tcg_gen_extr32_i64(lo, hi, tmp);
+ tcg_gen_extr_i64_tl(lo, hi, tmp);
} else {
- tcg_gen_extr32_i64(hi, lo, tmp);
+ tcg_gen_extr_i64_tl(hi, lo, tmp);
}
}
break;
@@ -2580,9 +2594,9 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
/* See above. */
if ((da.memop & MO_BSWAP) == MO_TE) {
- tcg_gen_extr32_i64(lo, hi, tmp);
+ tcg_gen_extr_i64_tl(lo, hi, tmp);
} else {
- tcg_gen_extr32_i64(hi, lo, tmp);
+ tcg_gen_extr_i64_tl(hi, lo, tmp);
}
}
break;
@@ -2592,8 +2606,8 @@ static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
gen_store_gpr(dc, rd + 1, lo);
}
-static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
- int insn, int rd)
+static void __attribute__((unused))
+gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr, int insn, int rd)
{
DisasASI da = get_asi(dc, insn, MO_TEUQ);
TCGv lo = gen_load_gpr(dc, rd + 1);
@@ -2603,10 +2617,11 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
break;
case GET_ASI_DTWINX:
+ assert(TARGET_LONG_BITS == 64);
gen_address_mask(dc, addr);
- tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
+ tcg_gen_qemu_st_tl(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
tcg_gen_addi_tl(addr, addr, 8);
- tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_st_tl(lo, addr, da.mem_idx, da.memop);
break;
case GET_ASI_DIRECT:
@@ -2617,15 +2632,37 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
byte swapped. We will perform one 64-bit LE store, so now
we must swap the order of the construction. */
if ((da.memop & MO_BSWAP) == MO_TE) {
- tcg_gen_concat32_i64(t64, lo, hi);
+ tcg_gen_concat_tl_i64(t64, lo, hi);
} else {
- tcg_gen_concat32_i64(t64, hi, lo);
+ tcg_gen_concat_tl_i64(t64, hi, lo);
}
gen_address_mask(dc, addr);
tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
}
break;
+ case GET_ASI_BFILL:
+ assert(TARGET_LONG_BITS == 32);
+ /* Store 32 bytes of T64 to ADDR. */
+ /* ??? The original qemu code suggests 8-byte alignment, dropping
+ the low bits, but the only place I can see this used is in the
+ Linux kernel with 32 byte alignment, which would make more sense
+ as a cacheline-style operation. */
+ {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ TCGv d_addr = tcg_temp_new();
+ TCGv eight = tcg_constant_tl(8);
+ int i;
+
+ tcg_gen_concat_tl_i64(t64, lo, hi);
+ tcg_gen_andi_tl(d_addr, addr, -8);
+ for (i = 0; i < 32; i += 8) {
+ tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
+ tcg_gen_add_tl(d_addr, d_addr, eight);
+ }
+ }
+ break;
+
default:
/* ??? In theory we've handled all of the ASIs that are valid
for stda, and this should raise DAE_invalid_asi. */
@@ -2636,9 +2673,9 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
/* See above. */
if ((da.memop & MO_BSWAP) == MO_TE) {
- tcg_gen_concat32_i64(t64, lo, hi);
+ tcg_gen_concat_tl_i64(t64, lo, hi);
} else {
- tcg_gen_concat32_i64(t64, hi, lo);
+ tcg_gen_concat_tl_i64(t64, hi, lo);
}
save_state(dc);
@@ -2648,8 +2685,8 @@ static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
}
}
-static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
- int insn, int rd)
+static void __attribute__((unused))
+gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv, int insn, int rd)
{
DisasASI da = get_asi(dc, insn, MO_TEUQ);
TCGv oldv;
@@ -2670,88 +2707,6 @@ static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
}
}
-#elif !defined(CONFIG_USER_ONLY)
-static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
-{
- /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
- whereby "rd + 1" elicits "error: array subscript is above array".
- Since we have already asserted that rd is even, the semantics
- are unchanged. */
- TCGv lo = gen_dest_gpr(dc, rd | 1);
- TCGv hi = gen_dest_gpr(dc, rd);
- TCGv_i64 t64 = tcg_temp_new_i64();
- DisasASI da = get_asi(dc, insn, MO_TEUQ);
-
- switch (da.type) {
- case GET_ASI_EXCP:
- return;
- case GET_ASI_DIRECT:
- gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
- break;
- default:
- {
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
-
- save_state(dc);
- gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
- }
- break;
- }
-
- tcg_gen_extr_i64_i32(lo, hi, t64);
- gen_store_gpr(dc, rd | 1, lo);
- gen_store_gpr(dc, rd, hi);
-}
-
-static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
- int insn, int rd)
-{
- DisasASI da = get_asi(dc, insn, MO_TEUQ);
- TCGv lo = gen_load_gpr(dc, rd + 1);
- TCGv_i64 t64 = tcg_temp_new_i64();
-
- tcg_gen_concat_tl_i64(t64, lo, hi);
-
- switch (da.type) {
- case GET_ASI_EXCP:
- break;
- case GET_ASI_DIRECT:
- gen_address_mask(dc, addr);
- tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop | MO_ALIGN);
- break;
- case GET_ASI_BFILL:
- /* Store 32 bytes of T64 to ADDR. */
- /* ??? The original qemu code suggests 8-byte alignment, dropping
- the low bits, but the only place I can see this used is in the
- Linux kernel with 32 byte alignment, which would make more sense
- as a cacheline-style operation. */
- {
- TCGv d_addr = tcg_temp_new();
- TCGv eight = tcg_constant_tl(8);
- int i;
-
- tcg_gen_andi_tl(d_addr, addr, -8);
- for (i = 0; i < 32; i += 8) {
- tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
- tcg_gen_add_tl(d_addr, d_addr, eight);
- }
- }
- break;
- default:
- {
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(MO_UQ);
-
- save_state(dc);
- gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
- }
- break;
- }
-}
-#endif
-
static TCGv get_src1(DisasContext *dc, unsigned int insn)
{
unsigned int rs1 = GET_FIELD(insn, 13, 17);