@@ -546,6 +546,10 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
idx = cpufreq_frequency_table_target(policy, target_freq,
CPUFREQ_RELATION_L);
+
+ /* Replace the target with an efficient one */
+ idx = cpufreq_frequency_find_efficient(policy, idx);
+
policy->cached_resolved_idx = idx;
return policy->freq_table[idx].frequency;
}
@@ -994,6 +994,17 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
}
}
+static inline unsigned int
+cpufreq_frequency_find_efficient(struct cpufreq_policy *policy,
+ unsigned int idx)
+{
+ struct cpufreq_frequency_table *table = policy->freq_table;
+ unsigned int efficient_idx = table[idx].efficient;
+
+ return table[efficient_idx].frequency <= policy->max ? efficient_idx :
+ idx;
+}
+
static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *pos;
Avoid using frequencies marked as inefficient. This change affects schedutil, which is the only in-tree governor using the function cpufreq_driver_resolve_freq(). Signed-off-by: Vincent Donnefort <vincent.donnefort@arm.com>