@@ -2263,35 +2263,40 @@ static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
}
}
-static void __attribute__((unused))
-gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
+static void gen_ldf_asi0(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
{
- DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
+ MemOp memop = da->memop;
+ MemOp size = memop & MO_SIZE;
TCGv_i32 d32;
TCGv_i64 d64;
- switch (da.type) {
+ /* TODO: Use 128-bit load/store below. */
+ if (size == MO_128) {
+ memop = (memop & ~MO_SIZE) | MO_64;
+ }
+
+ switch (da->type) {
case GET_ASI_EXCP:
break;
case GET_ASI_DIRECT:
- gen_address_mask(dc, addr);
+ memop |= MO_ALIGN_4;
switch (size) {
- case 4:
+ case MO_32:
d32 = gen_dest_fpr_F(dc);
- tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
+ tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
gen_store_fpr_F(dc, rd, d32);
break;
- case 8:
- tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
- da.memop | MO_ALIGN_4);
+
+ case MO_64:
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx, memop);
break;
- case 16:
+
+ case MO_128:
d64 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
+ tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
tcg_gen_addi_tl(addr, addr, 8);
- tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
- da.memop | MO_ALIGN_4);
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + 1], addr, da->mem_idx, memop);
tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
break;
default:
@@ -2301,24 +2306,19 @@ gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
case GET_ASI_BLOCK:
/* Valid for lddfa on aligned registers only. */
- if (size == 8 && (rd & 7) == 0) {
- MemOp memop;
+ if (size == MO_64 && (rd & 7) == 0) {
TCGv eight;
int i;
- gen_address_mask(dc, addr);
-
/* The first operation checks required alignment. */
- memop = da.memop | MO_ALIGN_64;
eight = tcg_constant_tl(8);
for (i = 0; ; ++i) {
- tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
- da.mem_idx, memop);
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
+ memop | (i == 0 ? MO_ALIGN_64 : 0));
if (i == 7) {
break;
}
tcg_gen_add_tl(addr, addr, eight);
- memop = da.memop;
}
} else {
gen_exception(dc, TT_ILL_INSN);
@@ -2327,10 +2327,9 @@ gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
case GET_ASI_SHORT:
/* Valid for lddfa only. */
- if (size == 8) {
- gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
- da.memop | MO_ALIGN);
+ if (size == MO_64) {
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
+ memop | MO_ALIGN);
} else {
gen_exception(dc, TT_ILL_INSN);
}
@@ -2338,8 +2337,8 @@ gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
default:
{
- TCGv_i32 r_asi = tcg_constant_i32(da.asi);
- TCGv_i32 r_mop = tcg_constant_i32(da.memop | MO_ALIGN);
+ TCGv_i32 r_asi = tcg_constant_i32(da->asi);
+ TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
save_state(dc);
/* According to the table in the UA2011 manual, the only
@@ -2347,21 +2346,23 @@ gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
the NO_FAULT asis. We still need a helper for these,
but we can just use the integer asi helper for them. */
switch (size) {
- case 4:
+ case MO_32:
d64 = tcg_temp_new_i64();
gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
d32 = gen_dest_fpr_F(dc);
tcg_gen_extrl_i64_i32(d32, d64);
gen_store_fpr_F(dc, rd, d32);
break;
- case 8:
- gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr, r_asi, r_mop);
+ case MO_64:
+ gen_helper_ld_asi(cpu_fpr[rd / 2], tcg_env, addr,
+ r_asi, r_mop);
break;
- case 16:
+ case MO_128:
d64 = tcg_temp_new_i64();
gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
tcg_gen_addi_tl(addr, addr, 8);
- gen_helper_ld_asi(cpu_fpr[rd/2+1], tcg_env, addr, r_asi, r_mop);
+ gen_helper_ld_asi(cpu_fpr[rd / 2 + 1], tcg_env, addr,
+ r_asi, r_mop);
tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
break;
default:
@@ -2373,36 +2374,51 @@ gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
}
static void __attribute__((unused))
-gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
+gen_ldf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
{
- DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
+ DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL :
+ size == 8 ? MO_TEUQ : MO_TE | MO_128));
+
+ gen_address_mask(dc, addr);
+ gen_ldf_asi0(dc, &da, addr, rd);
+}
+
+static void gen_stf_asi0(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
+{
+ MemOp memop = da->memop;
+ MemOp size = memop & MO_SIZE;
TCGv_i32 d32;
- switch (da.type) {
+ /* TODO: Use 128-bit load/store below. */
+ if (size == MO_128) {
+ memop = (memop & ~MO_SIZE) | MO_64;
+ }
+
+ switch (da->type) {
case GET_ASI_EXCP:
break;
case GET_ASI_DIRECT:
- gen_address_mask(dc, addr);
+ memop |= MO_ALIGN_4;
switch (size) {
- case 4:
+ case MO_32:
d32 = gen_load_fpr_F(dc, rd);
- tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop | MO_ALIGN);
+ tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
break;
- case 8:
- tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
- da.memop | MO_ALIGN_4);
+ case MO_64:
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
+ memop | MO_ALIGN_4);
break;
- case 16:
+ case MO_128:
/* Only 4-byte alignment required. However, it is legal for the
cpu to signal the alignment fault, and the OS trap handler is
required to fix it up. Requiring 16-byte alignment here avoids
having to probe the second page before performing the first
write. */
- tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
- da.memop | MO_ALIGN_16);
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
+ memop | MO_ALIGN_16);
tcg_gen_addi_tl(addr, addr, 8);
- tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + 1], addr, da->mem_idx, memop);
break;
default:
g_assert_not_reached();
@@ -2411,24 +2427,19 @@ gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
case GET_ASI_BLOCK:
/* Valid for stdfa on aligned registers only. */
- if (size == 8 && (rd & 7) == 0) {
- MemOp memop;
+ if (size == MO_64 && (rd & 7) == 0) {
TCGv eight;
int i;
- gen_address_mask(dc, addr);
-
/* The first operation checks required alignment. */
- memop = da.memop | MO_ALIGN_64;
eight = tcg_constant_tl(8);
for (i = 0; ; ++i) {
- tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
- da.mem_idx, memop);
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr, da->mem_idx,
+ memop | (i == 0 ? MO_ALIGN_64 : 0));
if (i == 7) {
break;
}
tcg_gen_add_tl(addr, addr, eight);
- memop = da.memop;
}
} else {
gen_exception(dc, TT_ILL_INSN);
@@ -2437,10 +2448,9 @@ gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
case GET_ASI_SHORT:
/* Valid for stdfa only. */
- if (size == 8) {
- gen_address_mask(dc, addr);
- tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
- da.memop | MO_ALIGN);
+ if (size == MO_64) {
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da->mem_idx,
+ memop | MO_ALIGN);
} else {
gen_exception(dc, TT_ILL_INSN);
}
@@ -2455,6 +2465,16 @@ gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
}
}
+static void __attribute__((unused))
+gen_stf_asi(DisasContext *dc, TCGv addr, int insn, int size, int rd)
+{
+ DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL :
+ size == 8 ? MO_TEUQ : MO_TE | MO_128));
+
+ gen_address_mask(dc, addr);
+ gen_stf_asi0(dc, &da, addr, rd);
+}
+
static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
{
TCGv hi = gen_dest_gpr(dc, rd);
Take the operation size from the MemOp instead of a separate parameter. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/sparc/translate.c | 136 ++++++++++++++++++++++----------------- 1 file changed, 78 insertions(+), 58 deletions(-)