@@ -174,7 +174,7 @@ struct tb_desc {
target_ulong pc;
target_ulong cs_base;
CPUArchState *env;
- tb_page_addr_t phys_page1;
+ tb_page_addr_t page_addr0;
uint32_t flags;
uint32_t cflags;
uint32_t trace_vcpu_dstate;
@@ -186,7 +186,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
const struct tb_desc *desc = d;
if (tb->pc == desc->pc &&
- tb->page_addr[0] == desc->phys_page1 &&
+ tb->page_addr[0] == desc->page_addr0 &&
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
@@ -195,8 +195,8 @@ static bool tb_lookup_cmp(const void *p, const void *d)
if (tb->page_addr[1] == -1) {
return true;
} else {
- tb_page_addr_t phys_page2;
- target_ulong virt_page2;
+ tb_page_addr_t phys_page1;
+ target_ulong virt_page1;
/*
* We know that the first page matched, and an otherwise valid TB
@@ -207,9 +207,9 @@ static bool tb_lookup_cmp(const void *p, const void *d)
* is different for the new TB. Therefore any exception raised
* here by the faulting lookup is not premature.
*/
- virt_page2 = TARGET_PAGE_ALIGN(desc->pc);
- phys_page2 = get_page_addr_code(desc->env, virt_page2);
- if (tb->page_addr[1] == phys_page2) {
+ virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
+ phys_page1 = get_page_addr_code(desc->env, virt_page1);
+ if (tb->page_addr[1] == phys_page1) {
return true;
}
}
@@ -235,7 +235,7 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
if (phys_pc == -1) {
return NULL;
}
- desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
+ desc.page_addr0 = phys_pc;
h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
}
@@ -951,7 +951,8 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
{
- cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
+ cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
+ TARGET_PAGE_SIZE,
DIRTY_MEMORY_CODE);
}
@@ -1186,7 +1186,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
qemu_spin_unlock(&tb->jmp_lock);
/* remove the TB from the hash list */
- phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
+ phys_pc = tb->page_addr[0];
h = tb_hash_func(phys_pc, tb->pc, tb->flags, orig_cflags,
tb->trace_vcpu_dstate);
if (!qht_remove(&tb_ctx.htable, tb, h)) {
@@ -1342,7 +1342,7 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
* we can only insert TBs that are fully initialized.
*/
page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
- tb_page_add(p, tb, 0, phys_pc & TARGET_PAGE_MASK);
+ tb_page_add(p, tb, 0, phys_pc);
if (p2) {
tb_page_add(p2, tb, 1, phys_page2);
} else {
@@ -1697,11 +1697,12 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
if (n == 0) {
/* NOTE: tb_end may be after the end of the page, but
it is not a problem */
- tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
+ tb_start = tb->page_addr[0];
tb_end = tb_start + tb->size;
} else {
tb_start = tb->page_addr[1];
- tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
+ tb_end = tb_start + ((tb->page_addr[0] + tb->size)
+ & ~TARGET_PAGE_MASK);
}
if (!(tb_end <= start || tb_start >= end)) {
#ifdef TARGET_HAS_PRECISE_SMC
Let tb->page_addr[0] contain the offset within the page of the start of the translation block. We need to recover this value anyway at various points, and it is easier to discard the page offset when it's not needed, which happens naturally via the existing find_page shift. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- accel/tcg/cpu-exec.c | 16 ++++++++-------- accel/tcg/cputlb.c | 3 ++- accel/tcg/translate-all.c | 9 +++++---- 3 files changed, 15 insertions(+), 13 deletions(-)