@@ -42,6 +42,9 @@ void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
uintptr_t retaddr);
struct page_collection *page_collection_lock(tb_page_addr_t start,
tb_page_addr_t end);
+void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
+ unsigned size,
+ uintptr_t retaddr);
void page_collection_unlock(struct page_collection *set);
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
#endif /* CONFIG_SOFTMMU */
@@ -1508,10 +1508,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
- struct page_collection *pages
- = page_collection_lock(ram_addr, ram_addr + size);
- tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
- page_collection_unlock(pages);
+ tb_invalidate_phys_range_fast(ram_addr, size, retaddr);
}
/*
@@ -1184,10 +1184,6 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
}
/*
- * len must be <= 8 and start must be a multiple of len.
- * Called via softmmu_template.h when code areas are written to with
- * iothread mutex not held.
- *
* Call with all @pages in the range [@start, @start + len[ locked.
*/
void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
@@ -1205,4 +1201,21 @@ void tb_invalidate_phys_page_fast__locked(struct page_collection *pages,
tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
retaddr);
}
+
+/*
+ * len must be <= 8 and start must be a multiple of len.
+ * Called via softmmu_template.h when code areas are written to with
+ * iothread mutex not held.
+ */
+void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
+ unsigned size,
+ uintptr_t retaddr)
+{
+ struct page_collection *pages;
+
+ pages = page_collection_lock(ram_addr, ram_addr + size);
+ tb_invalidate_phys_page_fast__locked(pages, ram_addr, size, retaddr);
+ page_collection_unlock(pages);
+}
+
#endif /* CONFIG_USER_ONLY */