@@ -7131,6 +7131,14 @@ static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
return false;
}
+static inline bool rt_rq_has_blocked(struct rt_rq *rt_rq)
+{
+ if (rt_rq->avg.util_avg)
+ return true;
+
+ return false;
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
@@ -7191,6 +7199,9 @@ static void update_blocked_averages(int cpu)
done = false;
}
update_rt_rq_load_avg(rq_clock_task(rq), cpu, &rq->rt, 0);
+ /* Don't need periodic decay once load/util_avg are null */
+ if (rt_rq_has_blocked(&rq->rt))
+ done = false;
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
@@ -7259,7 +7270,7 @@ static inline void update_blocked_averages(int cpu)
update_rt_rq_load_avg(rq_clock_task(rq), cpu, &rq->rt, 0);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
- if (!cfs_rq_has_blocked(cfs_rq))
+ if (!cfs_rq_has_blocked(cfs_rq) && !rt_rq_has_blocked(&rq->rt))
rq->has_blocked_load = 0;
#endif
rq_unlock_irqrestore(rq, &rf);
Take into account rt's utilization when deciding to stop periodic update Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> --- kernel/sched/fair.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) -- 2.7.4