@@ -909,64 +909,10 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
/* main execution loop */
-int cpu_exec(CPUState *cpu)
+static int __attribute__((noinline))
+cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
{
int ret;
- SyncClocks sc = { 0 };
-
- /* replay_interrupt may need current_cpu */
- current_cpu = cpu;
-
- if (cpu_handle_halt(cpu)) {
- return EXCP_HALTED;
- }
-
- rcu_read_lock();
-
- cpu_exec_enter(cpu);
-
- /* Calculate difference between guest clock and host clock.
- * This delay includes the delay of the last cycle, so
- * what we have to do is sleep until it is 0. As for the
- * advance/delay we gain here, we try to fix it next time.
- */
- init_delay_params(&sc, cpu);
-
- /* prepare setjmp context for exception handling */
- if (sigsetjmp(cpu->jmp_env, 0) != 0) {
-#if defined(__clang__)
- /*
- * Some compilers wrongly smash all local variables after
- * siglongjmp (the spec requires that only non-volatile locals
- * which are changed between the sigsetjmp and siglongjmp are
- * permitted to be trashed). There were bug reports for gcc
- * 4.5.0 and clang. The bug is fixed in all versions of gcc
- * that we support, but is still unfixed in clang:
- * https://bugs.llvm.org/show_bug.cgi?id=21183
- *
- * Reload an essential local variable here for those compilers.
- * Newer versions of gcc would complain about this code (-Wclobbered),
- * so we only perform the workaround for clang.
- */
- cpu = current_cpu;
-#else
- /* Non-buggy compilers preserve this; assert the correct value. */
- g_assert(cpu == current_cpu);
-#endif
-
-#ifndef CONFIG_SOFTMMU
- clear_helper_retaddr();
- if (have_mmap_lock()) {
- mmap_unlock();
- }
-#endif
- if (qemu_mutex_iothread_locked()) {
- qemu_mutex_unlock_iothread();
- }
- qemu_plugin_disable_mem_helpers(cpu);
-
- assert_no_pages_locked();
- }
/* if an exception is pending, we execute it here */
while (!cpu_handle_exception(cpu, &ret)) {
@@ -1033,9 +979,60 @@ int cpu_exec(CPUState *cpu)
/* Try to align the host and virtual clocks
if the guest is in advance */
- align_clocks(&sc, cpu);
+ align_clocks(sc, cpu);
}
}
+ return ret;
+}
+
+static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc)
+{
+ /* Prepare setjmp context for exception handling. */
+ if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) {
+ /* Non-buggy compilers preserve this; assert the correct value. */
+ g_assert(cpu == current_cpu);
+
+#ifndef CONFIG_SOFTMMU
+ clear_helper_retaddr();
+ if (have_mmap_lock()) {
+ mmap_unlock();
+ }
+#endif
+ if (qemu_mutex_iothread_locked()) {
+ qemu_mutex_unlock_iothread();
+ }
+ qemu_plugin_disable_mem_helpers(cpu);
+
+ assert_no_pages_locked();
+ }
+
+ return cpu_exec_loop(cpu, sc);
+}
+
+int cpu_exec(CPUState *cpu)
+{
+ int ret;
+ SyncClocks sc = { 0 };
+
+ /* replay_interrupt may need current_cpu */
+ current_cpu = cpu;
+
+ if (cpu_handle_halt(cpu)) {
+ return EXCP_HALTED;
+ }
+
+ rcu_read_lock();
+ cpu_exec_enter(cpu);
+
+ /*
+ * Calculate difference between guest clock and host clock.
+ * This delay includes the delay of the last cycle, so
+ * what we have to do is sleep until it is 0. As for the
+ * advance/delay we gain here, we try to fix it next time.
+ */
+ init_delay_params(&sc, cpu);
+
+ ret = cpu_exec_setjmp(cpu, &sc);
cpu_exec_exit(cpu);
rcu_read_unlock();