@@ -656,6 +656,7 @@ struct TCGContext {
uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_arg if !direct_jump */
TCGRegSet reserved_regs;
+ uint32_t tb_cflags; /* cflags of the current TB */
intptr_t current_frame_offset;
intptr_t frame_start;
intptr_t frame_end;
@@ -1296,6 +1296,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tb->flags = flags;
tb->cflags = cflags;
tb->trace_vcpu_dstate = *cpu->trace_dstate;
+ tcg_ctx.tb_cflags = cflags;
#ifdef CONFIG_PROFILER
tcg_ctx.tb_count1++; /* includes aborted translations because of
@@ -121,7 +121,7 @@ void tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3,
void tcg_gen_mb(TCGBar mb_type)
{
- if (parallel_cpus) {
+ if (tcg_ctx.tb_cflags & CF_PARALLEL) {
tcg_gen_op1(INDEX_op_mb, mb_type);
}
}
@@ -2780,7 +2780,7 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv,
{
memop = tcg_canonicalize_memop(memop, 0, 0);
- if (!parallel_cpus) {
+ if (!(tcg_ctx.tb_cflags & CF_PARALLEL)) {
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_temp_new_i32();
@@ -2824,7 +2824,7 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv,
{
memop = tcg_canonicalize_memop(memop, 1, 0);
- if (!parallel_cpus) {
+ if (!(tcg_ctx.tb_cflags & CF_PARALLEL)) {
TCGv_i64 t1 = tcg_temp_new_i64();
TCGv_i64 t2 = tcg_temp_new_i64();
@@ -3001,7 +3001,7 @@ static void * const table_##NAME[16] = { \
void tcg_gen_atomic_##NAME##_i32 \
(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, TCGMemOp memop) \
{ \
- if (parallel_cpus) { \
+ if (tcg_ctx.tb_cflags & CF_PARALLEL) { \
do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \
} else { \
do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \
@@ -3011,7 +3011,7 @@ void tcg_gen_atomic_##NAME##_i32 \
void tcg_gen_atomic_##NAME##_i64 \
(TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, TCGMemOp memop) \
{ \
- if (parallel_cpus) { \
+ if (tcg_ctx.tb_cflags & CF_PARALLEL) { \
do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \
} else { \
do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \