@@ -37,9 +37,9 @@ void page_table_config_init(void);
#ifdef CONFIG_SOFTMMU
struct page_collection;
-void tb_invalidate_phys_page_fast(struct page_collection *pages,
- tb_page_addr_t start, int len,
- uintptr_t retaddr);
+void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
+ tb_page_addr_t start, int len,
+ uintptr_t retaddr);
struct page_collection *page_collection_lock(tb_page_addr_t start,
tb_page_addr_t end);
void page_collection_unlock(struct page_collection *set);
@@ -1510,7 +1510,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
struct page_collection *pages
= page_collection_lock(ram_addr, ram_addr + size);
- tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
+ tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
page_collection_unlock(pages);
}
@@ -1190,9 +1190,9 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
*
* Call with all @pages in the range [@start, @start + len[ locked.
*/
-void tb_invalidate_phys_page_fast(struct page_collection *pages,
- tb_page_addr_t start, int len,
- uintptr_t retaddr)
+void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
+ tb_page_addr_t start, int len,
+ uintptr_t retaddr)
{
PageDesc *p;