@@ -8586,6 +8586,14 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
se->depth = parent->depth + 1;
}
+ /*
+ * Set last_update_time to something different from 0 to make
+ * sure the 1st sched_entity will not be attached twice: once
+ * when attaching the task to the group and one more time when
+ * enqueueing the task.
+ */
+ tg->cfs_rq[cpu]->avg.last_update_time = rq_clock_task(rq_of(cfs_rq));
+
se->my_q = cfs_rq;
/* guarantee group entities always have weight */
update_load_set(&se->load, NICE_0_LOAD);
The cfs_rq->avg.last_update_time is initialize to 0 with the main effect that the 1st sched_entity that will be attached, will keep its last_update_time set to 0 and will attached once again during the enqueue. Initialize cfs_rq->avg.last_update_time to current rq's clock Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> --- kernel/sched/fair.c | 8 ++++++++ 1 file changed, 8 insertions(+) -- 1.9.1