@@ -519,10 +519,12 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb)
}
/* current cflags for hashing/comparison */
-static inline uint32_t curr_cflags(void)
+static inline uint32_t curr_cflags(CPUState *cpu)
{
- return (parallel_cpus ? CF_PARALLEL : 0)
- | (icount_enabled() ? CF_USE_ICOUNT : 0);
+ uint32_t cflags = deposit32(0, CF_CLUSTER_SHIFT, 8, cpu->cluster_index);
+ cflags |= parallel_cpus ? CF_PARALLEL : 0;
+ cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
+ return cflags;
}
/* TranslationBlock invalidate API */
@@ -27,9 +27,6 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
hash = tb_jmp_cache_hash_func(pc);
tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
- cf_mask &= ~CF_CLUSTER_MASK;
- cf_mask |= cpu->cluster_index << CF_CLUSTER_SHIFT;
-
if (likely(tb &&
tb->pc == pc &&
tb->cs_base == cs_base &&
@@ -249,8 +249,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
TranslationBlock *tb;
target_ulong cs_base, pc;
uint32_t flags;
- uint32_t cflags = 1;
- uint32_t cf_mask = cflags & CF_HASH_MASK;
+ uint32_t cflags = (curr_cflags(cpu) & ~CF_PARALLEL) | 1;
int tb_exit;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
@@ -260,7 +259,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
cpu->running = true;
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- tb = tb_lookup(cpu, pc, cs_base, flags, cf_mask);
+ tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
if (tb == NULL) {
mmap_lock();
@@ -497,7 +496,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
if (replay_has_exception()
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
/* Execute just one insn to trigger exception pending in the log */
- cpu->cflags_next_tb = (curr_cflags() & ~CF_USE_ICOUNT) | 1;
+ cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) | 1;
}
#endif
return false;
@@ -794,7 +793,7 @@ int cpu_exec(CPUState *cpu)
have CF_INVALID set, -1 is a convenient invalid value that
does not require tcg headers for cpu_common_reset. */
if (cflags == -1) {
- cflags = curr_cflags();
+ cflags = curr_cflags(cpu);
} else {
cpu->cflags_next_tb = -1;
}
@@ -154,7 +154,7 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
- tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags());
+ tb = tb_lookup(cpu, pc, cs_base, flags, curr_cflags(cpu));
if (tb == NULL) {
return tcg_code_gen_epilogue;
}
@@ -2194,7 +2194,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
if (current_tb_modified) {
page_collection_unlock(pages);
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags();
+ cpu->cflags_next_tb = 1 | curr_cflags(cpu);
mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
@@ -2362,7 +2362,7 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags();
+ cpu->cflags_next_tb = 1 | curr_cflags(cpu);
return true;
}
#endif
@@ -2438,7 +2438,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
* operations only (which execute after completion) so we don't
* double instrument the instruction.
*/
- cpu->cflags_next_tb = curr_cflags() | CF_MEMI_ONLY | CF_LAST_IO | n;
+ cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
"cpu_io_recompile: rewound execution of TB to "
@@ -937,7 +937,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
cpu_loop_exit_restore(cpu, ra);
} else {
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags();
+ cpu->cflags_next_tb = 1 | curr_cflags(cpu);
mmap_unlock();
if (ra) {
cpu_restore_state(cpu, ra, true);