@@ -6822,6 +6822,17 @@ group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
return false;
}
+/*
+ * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller
+ * per-cpu capacity than sched_group ref.
+ */
+static inline bool
+group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
+{
+ return sg->sgc->max_capacity * capacity_margin <
+ ref->sgc->max_capacity * 1024;
+}
+
static inline enum
group_type group_classify(struct sched_group *group,
struct sg_lb_stats *sgs)
@@ -6925,6 +6936,19 @@ static bool update_sd_pick_busiest(struct lb_env *env,
if (sgs->avg_load <= busiest->avg_load)
return false;
+ if (!(env->sd->flags & SD_ASYM_CPUCAPACITY))
+ goto asym_packing;
+
+ /* Candidate sg has no more than one task per cpu and has
+ * higher per-cpu capacity. Migrating tasks to less capable
+ * cpus may harm throughput. Maximize throughput,
+ * power/energy consequences are not considered.
+ */
+ if (sgs->sum_nr_running <= sgs->group_weight &&
+ group_smaller_cpu_capacity(sds->local, sg))
+ return false;
+
+asym_packing:
/* This is the busiest node in its class. */
if (!(env->sd->flags & SD_ASYM_PACKING))
return true;
For asymmetric cpu capacity systems it is counter-productive for throughput if low capacity cpus are pulling tasks from non-overloaded cpus with higher capacity. The assumption is that higher cpu capacity is preferred over running alone in a group with lower cpu capacity. This patch rejects higher cpu capacity groups with one or less task per cpu as potential busiest group which could otherwise lead to a series of failing load-balancing attempts leading to a force-migration. cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com> --- kernel/sched/fair.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) -- 1.9.1