@@ -818,6 +818,4 @@ DEF_HELPER_FLAGS_5(stq_le_parallel, TCG_CALL_NO_WG,
void, env, tl, i64, i64, i32)
DEF_HELPER_FLAGS_5(stq_be_parallel, TCG_CALL_NO_WG,
void, env, tl, i64, i64, i32)
-DEF_HELPER_5(stqcx_le_parallel, i32, env, tl, i64, i64, i32)
-DEF_HELPER_5(stqcx_be_parallel, i32, env, tl, i64, i64, i32)
#endif
@@ -413,50 +413,6 @@ void helper_stq_be_parallel(CPUPPCState *env, target_ulong addr,
val = int128_make128(lo, hi);
cpu_atomic_sto_be_mmu(env, addr, val, opidx, GETPC());
}
-
-uint32_t helper_stqcx_le_parallel(CPUPPCState *env, target_ulong addr,
- uint64_t new_lo, uint64_t new_hi,
- uint32_t opidx)
-{
- bool success = false;
-
- /* We will have raised EXCP_ATOMIC from the translator. */
- assert(HAVE_CMPXCHG128);
-
- if (likely(addr == env->reserve_addr)) {
- Int128 oldv, cmpv, newv;
-
- cmpv = int128_make128(env->reserve_val2, env->reserve_val);
- newv = int128_make128(new_lo, new_hi);
- oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv,
- opidx, GETPC());
- success = int128_eq(oldv, cmpv);
- }
- env->reserve_addr = -1;
- return env->so + success * CRF_EQ_BIT;
-}
-
-uint32_t helper_stqcx_be_parallel(CPUPPCState *env, target_ulong addr,
- uint64_t new_lo, uint64_t new_hi,
- uint32_t opidx)
-{
- bool success = false;
-
- /* We will have raised EXCP_ATOMIC from the translator. */
- assert(HAVE_CMPXCHG128);
-
- if (likely(addr == env->reserve_addr)) {
- Int128 oldv, cmpv, newv;
-
- cmpv = int128_make128(env->reserve_val2, env->reserve_val);
- newv = int128_make128(new_lo, new_hi);
- oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv,
- opidx, GETPC());
- success = int128_eq(oldv, cmpv);
- }
- env->reserve_addr = -1;
- return env->so + success * CRF_EQ_BIT;
-}
#endif
/*****************************************************************************/
@@ -72,6 +72,7 @@ static TCGv cpu_cfar;
static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32;
static TCGv cpu_reserve;
static TCGv cpu_reserve_val;
+static TCGv cpu_reserve_val2;
static TCGv cpu_fpscr;
static TCGv_i32 cpu_access_type;
@@ -141,8 +142,11 @@ void ppc_translate_init(void)
offsetof(CPUPPCState, reserve_addr),
"reserve_addr");
cpu_reserve_val = tcg_global_mem_new(cpu_env,
- offsetof(CPUPPCState, reserve_val),
- "reserve_val");
+ offsetof(CPUPPCState, reserve_val),
+ "reserve_val");
+ cpu_reserve_val2 = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, reserve_val2),
+ "reserve_val2");
cpu_fpscr = tcg_global_mem_new(cpu_env,
offsetof(CPUPPCState, fpscr), "fpscr");
@@ -3998,78 +4002,66 @@ static void gen_lqarx(DisasContext *ctx)
/* stqcx. */
static void gen_stqcx_(DisasContext *ctx)
{
+ TCGLabel *lab_fail, *lab_over;
int rs = rS(ctx->opcode);
- TCGv EA, hi, lo;
+ TCGv EA, t0, t1;
+ TCGv_i128 cmp, val;
if (unlikely(rs & 1)) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
return;
}
+ lab_fail = gen_new_label();
+ lab_over = gen_new_label();
+
gen_set_access_type(ctx, ACCESS_RES);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
+ tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lab_fail);
+ tcg_temp_free(EA);
+
+ cmp = tcg_temp_new_i128();
+ val = tcg_temp_new_i128();
+
+ tcg_gen_concat_i64_i128(cmp, cpu_reserve_val2, cpu_reserve_val);
+
/* Note that the low part is always in RS+1, even in LE mode. */
- lo = cpu_gpr[rs + 1];
- hi = cpu_gpr[rs];
+ tcg_gen_concat_i64_i128(val, cpu_gpr[rs + 1], cpu_gpr[rs]);
- if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
- if (HAVE_CMPXCHG128) {
- TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_128) | MO_ALIGN);
- if (ctx->le_mode) {
- gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env,
- EA, lo, hi, oi);
- } else {
- gen_helper_stqcx_be_parallel(cpu_crf[0], cpu_env,
- EA, lo, hi, oi);
- }
- tcg_temp_free_i32(oi);
- } else {
- /* Restart with exclusive lock. */
- gen_helper_exit_atomic(cpu_env);
- ctx->base.is_jmp = DISAS_NORETURN;
- }
- tcg_temp_free(EA);
- } else {
- TCGLabel *lab_fail = gen_new_label();
- TCGLabel *lab_over = gen_new_label();
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
+ tcg_gen_atomic_cmpxchg_i128(val, cpu_reserve, cmp, val, ctx->mem_idx,
+ DEF_MEMOP(MO_128 | MO_ALIGN));
+ tcg_temp_free_i128(cmp);
- tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, lab_fail);
- tcg_temp_free(EA);
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ tcg_gen_extr_i128_i64(t1, t0, val);
+ tcg_temp_free_i128(val);
- gen_qemu_ld64_i64(ctx, t0, cpu_reserve);
- tcg_gen_ld_i64(t1, cpu_env, (ctx->le_mode
- ? offsetof(CPUPPCState, reserve_val2)
- : offsetof(CPUPPCState, reserve_val)));
- tcg_gen_brcond_i64(TCG_COND_NE, t0, t1, lab_fail);
+ tcg_gen_xor_tl(t1, t1, cpu_reserve_val2);
+ tcg_gen_xor_tl(t0, t0, cpu_reserve_val);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t1);
- tcg_gen_addi_i64(t0, cpu_reserve, 8);
- gen_qemu_ld64_i64(ctx, t0, t0);
- tcg_gen_ld_i64(t1, cpu_env, (ctx->le_mode
- ? offsetof(CPUPPCState, reserve_val)
- : offsetof(CPUPPCState, reserve_val2)));
- tcg_gen_brcond_i64(TCG_COND_NE, t0, t1, lab_fail);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t0, t0, 0);
+ tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT);
+ tcg_gen_or_tl(t0, t0, cpu_so);
+ tcg_gen_trunc_tl_i32(cpu_crf[0], t0);
+ tcg_temp_free(t0);
- /* Success */
- gen_qemu_st64_i64(ctx, ctx->le_mode ? lo : hi, cpu_reserve);
- tcg_gen_addi_i64(t0, cpu_reserve, 8);
- gen_qemu_st64_i64(ctx, ctx->le_mode ? hi : lo, t0);
+ tcg_gen_br(lab_over);
+ gen_set_label(lab_fail);
- tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
- tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ);
- tcg_gen_br(lab_over);
+ /*
+ * Address mismatch implies failure. But we still need to provide
+ * the memory barrier semantics of the instruction.
+ */
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
- gen_set_label(lab_fail);
- tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
-
- gen_set_label(lab_over);
- tcg_gen_movi_tl(cpu_reserve, -1);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
- }
+ gen_set_label(lab_over);
+ tcg_gen_movi_tl(cpu_reserve, -1);
}
#endif /* defined(TARGET_PPC64) */