@@ -11,9 +11,10 @@
#include "exec/exec-all.h"
-TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
- target_ulong cs_base, uint32_t flags,
- int cflags);
+TranslationBlock *tb_gen_code(CPUState *cpu,
+ tb_page_addr_t phys_pc, void *host_pc,
+ target_ulong pc, target_ulong cs_base,
+ uint32_t flags, int cflags);
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
void page_init(void);
void tb_htable_init(void);
@@ -485,6 +485,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
target_ulong cs_base, pc;
uint32_t flags, cflags;
tb_page_addr_t phys_pc;
+ void *host_pc;
int tb_exit;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
@@ -507,17 +508,17 @@ void cpu_exec_step_atomic(CPUState *cpu)
* Any breakpoint for this insn will have been recognized earlier.
*/
- phys_pc = get_page_addr_code(env, pc);
+ mmap_lock();
+ phys_pc = get_page_addr_code_hostp(env, pc, true, &host_pc);
if (phys_pc == -1) {
tb = NULL;
} else {
tb = tb_lookup(cpu, phys_pc, pc, cs_base, flags, cflags);
}
if (tb == NULL) {
- mmap_lock();
- tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
- mmap_unlock();
+ tb = tb_gen_code(cpu, phys_pc, host_pc, pc, cs_base, flags, cflags);
}
+ mmap_unlock();
cpu_exec_enter(cpu);
/* execute the generated code */
@@ -958,6 +959,7 @@ int cpu_exec(CPUState *cpu)
target_ulong cs_base, pc;
uint32_t flags, cflags;
tb_page_addr_t phys_pc;
+ void *host_pc;
cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
@@ -979,22 +981,24 @@ int cpu_exec(CPUState *cpu)
break;
}
- phys_pc = get_page_addr_code(cpu->env_ptr, pc);
+ mmap_lock();
+ phys_pc = get_page_addr_code_hostp(cpu->env_ptr, pc,
+ true, &host_pc);
if (phys_pc == -1) {
tb = NULL;
} else {
tb = tb_lookup(cpu, phys_pc, pc, cs_base, flags, cflags);
}
if (tb == NULL) {
- mmap_lock();
- tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
- mmap_unlock();
+ tb = tb_gen_code(cpu, phys_pc, host_pc, pc,
+ cs_base, flags, cflags);
/*
* We add the TB in the virtual pc hash table
* for the fast lookup
*/
qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
}
+ mmap_unlock();
#ifndef CONFIG_USER_ONLY
/*
@@ -1326,12 +1326,13 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
/* Called with mmap_lock held for user mode emulation. */
TranslationBlock *tb_gen_code(CPUState *cpu,
+ tb_page_addr_t phys_pc, void *host_pc,
target_ulong pc, target_ulong cs_base,
uint32_t flags, int cflags)
{
CPUArchState *env = cpu->env_ptr;
TranslationBlock *tb, *existing_tb;
- tb_page_addr_t phys_pc, phys_page2;
+ tb_page_addr_t phys_page2;
target_ulong virt_page2;
tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size, max_insns;
@@ -1343,8 +1344,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
assert_memory_lock();
qemu_thread_jit_write();
- phys_pc = get_page_addr_code(env, pc);
-
if (phys_pc == -1) {
/* Generate a one-shot TB with 1 insn in it */
cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO | 1;
Reuse the result that we just used with tb_lookup. Pass in host_pc while touching these lines, to be used shortly. We must widen the scope of the mmap_lock, so that the page table lookup that is finally used is covered by the lock. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- accel/tcg/internal.h | 7 ++++--- accel/tcg/cpu-exec.c | 20 ++++++++++++-------- accel/tcg/translate-all.c | 5 ++--- 3 files changed, 18 insertions(+), 14 deletions(-)