===================================================================
@@ -86,15 +86,34 @@ static int cpuidle_idle_call(void)
if (cpu_idle_force_poll || tick_check_broadcast_expired())
return cpu_idle_poll();
+ /*
+ * Check if the idle task must be rescheduled. If it is the case,
+ * exit the function after re-enabling the local irq and set again
+ * the polling flag
+ */
if (current_clr_polling_and_test()) {
local_irq_enable();
__current_set_polling();
return 0;
}
+ /*
+ * During the idle period, stop measuring the disabled irqs
+ * critical sections latencies
+ */
stop_critical_timings();
+
+ /*
+ * Tell the RCU framework we are entering an idle section,
+ * so no more rcu read side critical sections and one more
+ * step to the grace period
+ */
rcu_idle_enter();
+ /*
+ * Check if the cpuidle framework is ready, otherwise fallback
+ * to the default arch specific idle method
+ */
next_state = cpuidle_enabled(drv, dev);
if (next_state < 0) {
arch_cpu_idle();
@@ -102,13 +121,16 @@ static int cpuidle_idle_call(void)
}
/*
- * Ask the governor for the next state, this call can fail for
- * different reasons: cpuidle is not enabled or an idle state
- * fulfilling the constraints was not found. In this case, we
- * fall back to the default idle function
+ * Ask the governor to choose an idle state it thinks it is
+ * convenient to go to. There is *always* a convenient idle
+ * state
*/
next_state = cpuidle_select(drv, dev);
+ /*
+ * The idle task must be scheduled, it is pointless to go to idle,
+ * just update no idle residency and get out of this function
+ */
if (need_resched()) {
dev->last_residency = 0;
/* give the governor an opportunity to reflect on the outcome */
@@ -119,6 +141,12 @@ static int cpuidle_idle_call(void)
trace_cpu_idle_rcuidle(next_state, dev->cpu);
+ /*
+ * Enter the idle state previously returned by the governor
+ * decision. This function will block until an interrupt
+ * occurs and will take care of re-enabling the local
+ * interrupts
+ */
entered_state = cpuidle_enter(drv, dev, next_state);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
@@ -155,6 +183,10 @@ static void cpu_idle_loop(void)
local_irq_disable();
arch_cpu_idle_enter();
+ /*
+ * It is up to the underlying functions to
+ * enable the local interrupts again
+ */
cpuidle_idle_call();
arch_cpu_idle_exit();