@@ -2563,23 +2563,33 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
const struct sched_class *class;
struct task_struct *p;
+again:
+ if (likely(rq->nr_running)) {
+ /*
+ * Optimization: we know that if all tasks are in
+ * the fair class we can call that function directly:
+ */
+ if (likely(rq->nr_running == rq->cfs.h_nr_running))
+ return fair_sched_class.pick_next_task(rq, prev);
+
+ for_each_class(class) {
+ p = class->pick_next_task(rq, prev);
+ if (p)
+ return p;
+ }
+ }
+
/*
- * Optimization: we know that if all tasks are in
- * the fair class we can call that function directly:
+ * If there is a task balanced on this cpu, pick the next task,
+ * otherwise fall in the optimization by picking the idle task
+ * directly.
*/
- if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
- p = fair_sched_class.pick_next_task(rq, prev);
- if (likely(p))
- return p;
- }
+ if (idle_balance(rq))
+ goto again;
- for_each_class(class) {
- p = class->pick_next_task(rq, prev);
- if (p)
- return p;
- }
+ rq->idle_stamp = rq_clock(rq);
- BUG(); /* the idle class will always have a runnable task */
+ return idle_sched_class.pick_next_task(rq, prev);
}
/*
@@ -2672,9 +2682,6 @@ need_resched:
pre_schedule(rq, prev);
- if (unlikely(!rq->nr_running))
- rq->idle_stamp = idle_balance(rq) ? 0 : rq_clock(rq);
-
if (prev->on_rq || rq->skip_clock_update < 0)
update_rq_clock(rq);