@@ -9,6 +9,7 @@
/* Replace task scheduler's default frequency-invariant accounting */
#define arch_scale_freq_capacity topology_get_freq_scale
+#define arch_scale_freq_invariant topology_scale_freq_invariant
/* Replace task scheduler's default cpu-invariant accounting */
#define arch_scale_cpu_capacity topology_get_cpu_scale
@@ -27,6 +27,7 @@ void topology_scale_freq_tick(void);
/* Replace task scheduler's default frequency-invariant accounting */
#define arch_scale_freq_capacity topology_get_freq_scale
+#define arch_scale_freq_invariant topology_scale_freq_invariant
/* Replace task scheduler's default cpu-invariant accounting */
#define arch_scale_cpu_capacity topology_get_cpu_scale
@@ -246,6 +246,13 @@ static int __init init_amu_fie(void)
static_branch_enable(&amu_fie_key);
}
+ /*
+ * If the system is not fully invariant after AMU init, disable
+ * partial use of counters for frequency invariance.
+ */
+ if (!topology_scale_freq_invariant())
+ static_branch_disable(&amu_fie_key);
+
free_valid_mask:
free_cpumask_var(valid_cpus);
@@ -21,6 +21,12 @@
#include <linux/sched.h>
#include <linux/smp.h>
+bool topology_scale_freq_invariant(void)
+{
+ return cpufreq_supports_freq_invariance() ||
+ arch_freq_counters_available(cpu_online_mask);
+}
+
__weak bool arch_freq_counters_available(const struct cpumask *cpus)
{
return false;
@@ -30,6 +30,8 @@ static inline unsigned long topology_get_freq_scale(int cpu)
return per_cpu(freq_scale, cpu);
}
+bool topology_scale_freq_invariant(void);
+
bool arch_freq_counters_available(const struct cpumask *cpus);
DECLARE_PER_CPU(unsigned long, thermal_pressure);