@@ -1083,35 +1083,33 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
static void
tb_invalidate_phys_page_range__locked(struct page_collection *pages,
PageDesc *p, tb_page_addr_t start,
- tb_page_addr_t end,
+ tb_page_addr_t last,
uintptr_t retaddr)
{
TranslationBlock *tb;
- tb_page_addr_t tb_start, tb_end;
PageForEachNext n;
#ifdef TARGET_HAS_PRECISE_SMC
bool current_tb_modified = false;
TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
#endif /* TARGET_HAS_PRECISE_SMC */
- tb_page_addr_t last G_GNUC_UNUSED = end - 1;
/*
- * We remove all the TBs in the range [start, end[.
+ * We remove all the TBs in the range [start, last].
* XXX: see if in some cases it could be faster to invalidate all the code
*/
PAGE_FOR_EACH_TB(start, last, p, tb, n) {
+ tb_page_addr_t tb_start, tb_last;
+
/* NOTE: this is subtle as a TB may span two physical pages */
+ tb_start = tb_page_addr0(tb);
+ tb_last = tb_start + tb->size - 1;
if (n == 0) {
- /* NOTE: tb_end may be after the end of the page, but
- it is not a problem */
- tb_start = tb_page_addr0(tb);
- tb_end = tb_start + tb->size;
+ tb_last = MIN(tb_last, tb_start | ~TARGET_PAGE_MASK);
} else {
tb_start = tb_page_addr1(tb);
- tb_end = tb_start + ((tb_page_addr0(tb) + tb->size)
- & ~TARGET_PAGE_MASK);
+ tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK);
}
- if (!(tb_end <= start || tb_start >= end)) {
+ if (!(tb_last < start || tb_start > last)) {
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb == tb &&
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
@@ -1164,7 +1162,7 @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
start = addr & TARGET_PAGE_MASK;
last = addr | ~TARGET_PAGE_MASK;
pages = page_collection_lock(start, last);
- tb_invalidate_phys_page_range__locked(pages, p, start, last + 1, 0);
+ tb_invalidate_phys_page_range__locked(pages, p, start, last, 0);
page_collection_unlock(pages);
}
@@ -1191,7 +1189,7 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
continue;
}
assert_page_locked(pd);
- tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
+ tb_invalidate_phys_page_range__locked(pages, pd, start, bound - 1, 0);
}
page_collection_unlock(pages);
}
@@ -1211,7 +1209,7 @@ static void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
}
assert_page_locked(p);
- tb_invalidate_phys_page_range__locked(pages, p, start, start + len, ra);
+ tb_invalidate_phys_page_range__locked(pages, p, start, start + len - 1, ra);
}
/*
Pass the address of the last byte to be changed, rather than the first address past the last byte. This avoids overflow when the last page of the address space is involved. Properly truncate tb_last to the end of the page; the comment about tb_end being past the end of the page being ok is not correct, considering overflow. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- accel/tcg/tb-maint.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-)