@@ -1052,23 +1052,25 @@ void tcg_exec_realizefn(CPUState *cpu, Error **errp)
cc->tcg_ops->initialize();
tcg_target_initialized = true;
}
- tlb_init(cpu);
- qemu_plugin_vcpu_init_hook(cpu);
+ cpu->tb_jmp_cache = g_new0(CPUJumpCache, 1);
+ tlb_init(cpu);
#ifndef CONFIG_USER_ONLY
tcg_iommu_init_notifier_list(cpu);
#endif /* !CONFIG_USER_ONLY */
+ /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
}
/* undo the initializations in reverse order */
void tcg_exec_unrealizefn(CPUState *cpu)
{
+ qemu_plugin_vcpu_exit_hook(cpu);
#ifndef CONFIG_USER_ONLY
tcg_iommu_free_notifier_list(cpu);
#endif /* !CONFIG_USER_ONLY */
- qemu_plugin_vcpu_exit_hook(cpu);
tlb_destroy(cpu);
+ g_free(cpu->tb_jmp_cache);
}
#ifndef CONFIG_USER_ONLY
@@ -1580,15 +1580,13 @@ void tcg_flush_jmp_cache(CPUState *cpu)
{
CPUJumpCache *jc = cpu->tb_jmp_cache;
- if (likely(jc)) {
- for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
- qatomic_set(&jc->array[i].tb, NULL);
- }
- } else {
- /* This should happen once during realize, and thus never race. */
- jc = g_new0(CPUJumpCache, 1);
- jc = qatomic_xchg(&cpu->tb_jmp_cache, jc);
- assert(jc == NULL);
+ /* During early initialization, the cache may not yet be allocated. */
+ if (unlikely(jc == NULL)) {
+ return;
+ }
+
+ for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
+ qatomic_set(&jc->array[i].tb, NULL);
}
}
@@ -134,15 +134,23 @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp)
/* cache the cpu class for the hotpath */
cpu->cc = CPU_GET_CLASS(cpu);
- cpu_list_add(cpu);
if (!accel_cpu_realizefn(cpu, errp)) {
return;
}
+
/* NB: errp parameter is unused currently */
if (tcg_enabled()) {
tcg_exec_realizefn(cpu, errp);
}
+ /* Wait until cpu initialization complete before exposing cpu. */
+ cpu_list_add(cpu);
+
+ /* Plugin initialization must wait until cpu_index assigned. */
+ if (tcg_enabled()) {
+ qemu_plugin_vcpu_init_hook(cpu);
+ }
+
#ifdef CONFIG_USER_ONLY
assert(qdev_get_vmsd(DEVICE(cpu)) == NULL ||
qdev_get_vmsd(DEVICE(cpu))->unmigratable);