@@ -1109,6 +1109,9 @@ struct sched_avg {
u64 last_runnable_update;
s64 decay_count;
unsigned long load_avg_contrib;
+
+ unsigned long last_wakeup_update;
+ u32 wakeup_avg_sum;
};
#ifdef CONFIG_SCHEDSTATS
@@ -679,6 +679,8 @@ void init_task_runnable_average(struct task_struct *p)
p->se.avg.runnable_avg_sum = slice;
p->se.avg.runnable_avg_period = slice;
__update_task_entity_contrib(&p->se);
+
+ p->se.avg.last_wakeup_update = jiffies;
}
#else
void init_task_runnable_average(struct task_struct *p)
@@ -4025,6 +4027,21 @@ static void record_wakee(struct task_struct *p)
}
}
+static void update_wakeup_avg(struct task_struct *p)
+{
+ struct sched_entity *se = &p->se;
+ struct sched_avg *sa = &se->avg;
+ unsigned long now = ACCESS_ONCE(jiffies);
+
+ if (time_after(now, sa->last_wakeup_update)) {
+ sa->wakeup_avg_sum = decay_load(sa->wakeup_avg_sum,
+ jiffies_to_msecs(now - sa->last_wakeup_update));
+ sa->last_wakeup_update = now;
+ }
+
+ sa->wakeup_avg_sum += 1024;
+}
+
static void task_waking_fair(struct task_struct *p)
{
struct sched_entity *se = &p->se;
@@ -4045,6 +4062,7 @@ static void task_waking_fair(struct task_struct *p)
se->vruntime -= min_vruntime;
record_wakee(p);
+ update_wakeup_avg(p);
}
#ifdef CONFIG_FAIR_GROUP_SCHED
Track task wakeup rate in wakeup_avg_sum by counting wakeups. Note that this is _not_ cpu wakeups (idle exits). Task wakeups only cause cpu wakeups if the cpu is idle when the task wakeup occurs. The wakeup rate decays over time at the same rate as used for the existing entity load tracking. Unlike runnable_avg_sum, wakeup_avg_sum is counting events, not time, and is therefore theoretically unbounded and should be used with care. Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com> --- include/linux/sched.h | 3 +++ kernel/sched/fair.c | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+)