@@ -509,20 +509,20 @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
}
/*
- * Lock a range of pages ([@start,@end[) as well as the pages of all
+ * Lock a range of pages ([@start,@last]) as well as the pages of all
* intersecting TBs.
* Locking order: acquire locks in ascending order of page index.
*/
static struct page_collection *page_collection_lock(tb_page_addr_t start,
- tb_page_addr_t end)
+ tb_page_addr_t last)
{
struct page_collection *set = g_malloc(sizeof(*set));
tb_page_addr_t index;
PageDesc *pd;
start >>= TARGET_PAGE_BITS;
- end >>= TARGET_PAGE_BITS;
- g_assert(start <= end);
+ last >>= TARGET_PAGE_BITS;
+ g_assert(start <= last);
set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
page_entry_destroy);
@@ -532,7 +532,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
retry:
g_tree_foreach(set->tree, page_entry_lock, NULL);
- for (index = start; index <= end; index++) {
+ for (index = start; index <= last; index++) {
TranslationBlock *tb;
PageForEachNext n;
@@ -1152,7 +1152,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
void tb_invalidate_phys_page(tb_page_addr_t addr)
{
struct page_collection *pages;
- tb_page_addr_t start, end;
+ tb_page_addr_t start, last;
PageDesc *p;
p = page_find(addr >> TARGET_PAGE_BITS);
@@ -1161,9 +1161,9 @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
}
start = addr & TARGET_PAGE_MASK;
- end = start + TARGET_PAGE_SIZE;
- pages = page_collection_lock(start, end);
- tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
+ last = addr | ~TARGET_PAGE_MASK;
+ pages = page_collection_lock(start, last);
+ tb_invalidate_phys_page_range__locked(pages, p, start, last + 1, 0);
page_collection_unlock(pages);
}
@@ -1179,7 +1179,7 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
struct page_collection *pages;
tb_page_addr_t next;
- pages = page_collection_lock(start, end);
+ pages = page_collection_lock(start, end - 1);
for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
start < end;
start = next, next += TARGET_PAGE_SIZE) {
@@ -1224,7 +1224,7 @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
{
struct page_collection *pages;
- pages = page_collection_lock(ram_addr, ram_addr + size);
+ pages = page_collection_lock(ram_addr, ram_addr + size - 1);
tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
page_collection_unlock(pages);
}
Pass the address of the last byte to be changed, rather than the first address past the last byte. This avoids overflow when the last page of the address space is involved. Fixes a bug in the loop comparision where "<= end" would lock one more page than required. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- accel/tcg/tb-maint.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-)