@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/tick.h>
+#include <linux/sched.h>
#include <trace/events/power.h>
/**
@@ -315,6 +316,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
pr_debug("FREQ: %lu - CPU: %lu\n",
(unsigned long)freqs->new, (unsigned long)freqs->cpu);
trace_cpu_frequency(freqs->new, freqs->cpu);
+ set_curr_capacity(freqs->cpu, (freqs->new*1024)/policy->max);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
if (likely(policy) && likely(policy->cpu == freqs->cpu))
@@ -3068,4 +3068,6 @@ static inline unsigned long rlimit_max(unsigned int limit)
return task_rlimit_max(current, limit);
}
+void set_curr_capacity(int cpu, long capacity);
+
#endif
@@ -7410,9 +7410,15 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
#ifdef CONFIG_SMP
atomic64_set(&cfs_rq->decay_counter, 1);
atomic_long_set(&cfs_rq->removed_load, 0);
+ atomic_long_set(&cfs_rq->curr_capacity, 1024);
#endif
}
+void set_curr_capacity(int cpu, long capacity)
+{
+ atomic_long_set(&cpu_rq(cpu)->cfs.curr_capacity, capacity);
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
static void task_move_group_fair(struct task_struct *p, int on_rq)
{
@@ -341,6 +341,8 @@ struct cfs_rq {
u64 last_decay;
atomic_long_t removed_load;
+ atomic_long_t curr_capacity;
+
#ifdef CONFIG_FAIR_GROUP_SCHED
/* Required to track per-cpu representation of a task_group */
u32 tg_runnable_contrib;
The scheduler is currently unaware of frequency changes and the current compute capacity offered by the cpus. This patch is not the solution. It is a hack to give us something to experiment with for now. A proper solution could be based on the frequency invariant load tracking proposed in the past: https://lkml.org/lkml/2013/4/16/289 This patch should _not_ be considered safe. Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com> --- drivers/cpufreq/cpufreq.c | 2 ++ include/linux/sched.h | 2 ++ kernel/sched/fair.c | 6 ++++++ kernel/sched/sched.h | 2 ++ 4 files changed, 12 insertions(+)