@@ -220,6 +220,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
cfs_rq->uw_runnable_load_avg);
SEQ_printf(m, " .%-30s: %ld\n", "blocked_load_avg",
cfs_rq->blocked_load_avg);
+ SEQ_printf(m, " .%-30s: %ld\n", "uw_blocked_load_avg",
+ cfs_rq->uw_blocked_load_avg);
#ifdef CONFIG_FAIR_GROUP_SCHED
SEQ_printf(m, " .%-30s: %ld\n", "tg_load_contrib",
cfs_rq->tg_load_contrib);
@@ -2481,12 +2481,18 @@ static long __update_entity_load_avg_contrib(struct sched_entity *se,
}
static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
- long load_contrib)
+ long load_contrib,
+ long uw_load_contrib)
{
if (likely(load_contrib < cfs_rq->blocked_load_avg))
cfs_rq->blocked_load_avg -= load_contrib;
else
cfs_rq->blocked_load_avg = 0;
+
+ if (likely(uw_load_contrib < cfs_rq->uw_blocked_load_avg))
+ cfs_rq->uw_blocked_load_avg -= uw_load_contrib;
+ else
+ cfs_rq->uw_blocked_load_avg = 0;
}
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
@@ -2521,7 +2527,8 @@ static inline void update_entity_load_avg(struct sched_entity *se,
cfs_rq->uw_runnable_load_avg += uw_contrib_delta;
}
else
- subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
+ subtract_blocked_load_contrib(cfs_rq, -contrib_delta,
+ -uw_contrib_delta);
}
/*
@@ -2540,12 +2547,14 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
if (atomic_long_read(&cfs_rq->removed_load)) {
unsigned long removed_load;
removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
- subtract_blocked_load_contrib(cfs_rq, removed_load);
+ subtract_blocked_load_contrib(cfs_rq, removed_load, 0);
}
if (decays) {
cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
decays);
+ cfs_rq->uw_blocked_load_avg =
+ decay_load(cfs_rq->uw_blocked_load_avg, decays);
atomic64_add(decays, &cfs_rq->decay_counter);
cfs_rq->last_decay = now;
}
@@ -2591,7 +2600,8 @@ static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
/* migrated tasks did not contribute to our blocked load */
if (wakeup) {
- subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
+ subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib,
+ se->avg.uw_load_avg_contrib);
update_entity_load_avg(se, 0);
}
@@ -2620,6 +2630,7 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
if (sleep) {
cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
+ cfs_rq->uw_blocked_load_avg += se->avg.uw_load_avg_contrib;
se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
} /* migrations, e.g. sleep=0 leave decay_count == 0 */
}
@@ -7481,7 +7492,8 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
*/
if (se->avg.decay_count) {
__synchronize_entity_decay(se);
- subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
+ subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib,
+ se->avg.uw_load_avg_contrib);
}
#endif
}
@@ -337,7 +337,7 @@ struct cfs_rq {
* the FAIR_GROUP_SCHED case).
*/
unsigned long runnable_load_avg, blocked_load_avg;
- unsigned long uw_runnable_load_avg;
+ unsigned long uw_runnable_load_avg, uw_blocked_load_avg;
atomic64_t decay_counter;
u64 last_decay;
atomic_long_t removed_load;