@@ -175,6 +175,36 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
return tb;
}
+static inline void log_cpu_exec(target_ulong pc, CPUState *cpu,
+ const TranslationBlock *tb)
+{
+ if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC))
+ && qemu_log_in_addr_range(pc)) {
+
+ qemu_log_mask(CPU_LOG_EXEC,
+ "Trace %d: %p [" TARGET_FMT_lx
+ "/" TARGET_FMT_lx "/%#x] %s\n",
+ cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, tb->flags,
+ lookup_symbol(pc));
+
+#if defined(DEBUG_DISAS)
+ if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
+ FILE *logfile = qemu_log_lock();
+ int flags = 0;
+
+ if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
+ flags |= CPU_DUMP_FPU;
+ }
+#if defined(TARGET_I386)
+ flags |= CPU_DUMP_CCOP;
+#endif
+ log_cpu_state(cpu, flags);
+ qemu_log_unlock(logfile);
+ }
+#endif /* DEBUG_DISAS */
+ }
+}
+
/**
* helper_lookup_tb_ptr: quick check for next tb
* @env: current cpu state
@@ -196,11 +226,9 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
if (tb == NULL) {
return tcg_code_gen_epilogue;
}
- qemu_log_mask_and_addr(CPU_LOG_EXEC, pc,
- "Chain %d: %p ["
- TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
- cpu->cpu_index, tb->tc.ptr, cs_base, pc, flags,
- lookup_symbol(pc));
+
+ log_cpu_exec(pc, cpu, tb);
+
return tb->tc.ptr;
}
@@ -222,28 +250,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
TranslationBlock *last_tb;
const void *tb_ptr = itb->tc.ptr;
- qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
- "Trace %d: %p ["
- TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
- cpu->cpu_index, itb->tc.ptr,
- itb->cs_base, itb->pc, itb->flags,
- lookup_symbol(itb->pc));
-
-#if defined(DEBUG_DISAS)
- if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
- && qemu_log_in_addr_range(itb->pc)) {
- FILE *logfile = qemu_log_lock();
- int flags = 0;
- if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
- flags |= CPU_DUMP_FPU;
- }
-#if defined(TARGET_I386)
- flags |= CPU_DUMP_CCOP;
-#endif
- log_cpu_state(cpu, flags);
- qemu_log_unlock(logfile);
- }
-#endif /* DEBUG_DISAS */
+ log_cpu_exec(itb->pc, cpu, itb);
qemu_thread_jit_execute();
ret = tcg_qemu_tb_exec(env, tb_ptr);
Split out CPU_LOG_EXEC and CPU_LOG_TB_CPU logging from cpu_tb_exec to a new function. Perform only one pc range check after a combined mask check. Use the new function in lookup_tb_ptr. This enables CPU_LOG_TB_CPU between indirectly chained tbs. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- accel/tcg/cpu-exec.c | 61 ++++++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 27 deletions(-) -- 2.25.1