@@ -511,20 +511,20 @@ static gint tb_page_addr_cmp(gconstpointer ap, gconstpointer bp, gpointer udata)
}
/*
- * Lock a range of pages ([@start,@end[) as well as the pages of all
+ * Lock a range of pages ([@start,@last]) as well as the pages of all
* intersecting TBs.
* Locking order: acquire locks in ascending order of page index.
*/
static struct page_collection *page_collection_lock(tb_page_addr_t start,
- tb_page_addr_t end)
+ tb_page_addr_t last)
{
struct page_collection *set = g_malloc(sizeof(*set));
tb_page_addr_t index;
PageDesc *pd;
start >>= TARGET_PAGE_BITS;
- end >>= TARGET_PAGE_BITS;
- g_assert(start <= end);
+ last >>= TARGET_PAGE_BITS;
+ g_assert(start <= last);
set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL,
page_entry_destroy);
@@ -534,7 +534,7 @@ static struct page_collection *page_collection_lock(tb_page_addr_t start,
retry:
q_tree_foreach(set->tree, page_entry_lock, NULL);
- for (index = start; index <= end; index++) {
+ for (index = start; index <= last; index++) {
TranslationBlock *tb;
PageForEachNext n;
@@ -1154,7 +1154,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
void tb_invalidate_phys_page(tb_page_addr_t addr)
{
struct page_collection *pages;
- tb_page_addr_t start, end;
+ tb_page_addr_t start, last;
PageDesc *p;
p = page_find(addr >> TARGET_PAGE_BITS);
@@ -1163,9 +1163,9 @@ void tb_invalidate_phys_page(tb_page_addr_t addr)
}
start = addr & TARGET_PAGE_MASK;
- end = start + TARGET_PAGE_SIZE;
- pages = page_collection_lock(start, end);
- tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
+ last = addr | ~TARGET_PAGE_MASK;
+ pages = page_collection_lock(start, last);
+ tb_invalidate_phys_page_range__locked(pages, p, start, last + 1, 0);
page_collection_unlock(pages);
}
@@ -1181,7 +1181,7 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
struct page_collection *pages;
tb_page_addr_t next;
- pages = page_collection_lock(start, end);
+ pages = page_collection_lock(start, end - 1);
for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
start < end;
start = next, next += TARGET_PAGE_SIZE) {
@@ -1226,7 +1226,7 @@ void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
{
struct page_collection *pages;
- pages = page_collection_lock(ram_addr, ram_addr + size);
+ pages = page_collection_lock(ram_addr, ram_addr + size - 1);
tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
page_collection_unlock(pages);
}