@@ -435,6 +435,14 @@ void tb_lock(void);
void tb_unlock(void);
void tb_lock_reset(void);
+#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
+void assert_no_pages_locked(void);
+#else
+static inline void assert_no_pages_locked(void)
+{
+}
+#endif
+
#if !defined(CONFIG_USER_ONLY)
struct MemoryRegion *iotlb_to_region(CPUState *cpu,
@@ -273,6 +273,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
tcg_debug_assert(!have_mmap_lock());
#endif
tb_lock_reset();
+ assert_no_pages_locked();
}
if (in_exclusive_region) {
@@ -658,6 +658,12 @@ do_assert_page_locked(const PageDesc *pd, const char *file, int line)
#define assert_page_locked(pd) do_assert_page_locked(pd, __FILE__, __LINE__)
+void assert_no_pages_locked(void)
+{
+ ht_pages_locked_debug_init();
+ g_assert(g_hash_table_size(ht_pages_locked_debug) == 0);
+}
+
#else /* !CONFIG_DEBUG_TCG */
#define assert_page_locked(pd)
@@ -829,6 +835,7 @@ page_collection_lock(tb_page_addr_t start, tb_page_addr_t end)
set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
page_entry_destroy);
set->max = NULL;
+ assert_no_pages_locked();
retry:
g_tree_foreach(set->tree, page_entry_lock, NULL);