@@ -4644,7 +4644,9 @@ static int select_idle_sibling(struct task_struct *p, int target)
struct sched_domain *sd;
struct sched_group *sg;
int i = task_cpu(p);
+ int target_energy;
+#ifndef CONFIG_SCHED_ENERGY
if (idle_cpu(target))
return target;
@@ -4653,6 +4655,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
*/
if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
return i;
+#endif
+ target_energy = energy_diff_task(target, p);
/*
* Otherwise, iterate the domains and find an elegible idle cpu.
@@ -4666,8 +4670,12 @@ static int select_idle_sibling(struct task_struct *p, int target)
goto next;
for_each_cpu(i, sched_group_cpus(sg)) {
+ int diff;
if (i == target || !idle_cpu(i))
goto next;
+ diff = energy_diff_task(i, p);
+ if (diff > target_energy)
+ goto next;
}
target = cpumask_first_and(sched_group_cpus(sg),
Make select_idle_sibling() consider energy when picking an idle cpu. Only idle cpus are still considered. A more aggressive energy conserving approach could go further and consider partly utilized cpus. Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com> --- kernel/sched/fair.c | 8 ++++++++ 1 file changed, 8 insertions(+)