@@ -318,6 +318,7 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
+ WRITE_ONCE(cpudata->prefcore_ranking, AMD_CPPC_HIGHEST_PERF(cap1));
return 0;
}
@@ -339,6 +340,7 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
cppc_perf.lowest_nonlinear_perf);
WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
+ WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf);
if (cppc_state == AMD_PSTATE_ACTIVE)
return 0;
@@ -545,7 +547,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (target_perf < capacity)
des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
- min_perf = READ_ONCE(cpudata->highest_perf);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
if (_min_perf < capacity)
min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
@@ -749,6 +751,34 @@ static void amd_pstate_init_prefcore(void)
schedule_work(&sched_prefcore_work);
}
+static void amd_pstate_update_highest_perf(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ struct amd_cpudata *cpudata;
+ u32 prev_high = 0, cur_high = 0;
+ u64 highest_perf;
+ int ret;
+
+ if (!prefcore)
+ return;
+
+ ret = amd_pstate_get_highest_perf(cpu, &highest_perf);
+ if (ret)
+ return;
+
+ policy = cpufreq_cpu_get(cpu);
+ cpudata = policy->driver_data;
+ cur_high = highest_perf;
+ prev_high = READ_ONCE(cpudata->prefcore_ranking);
+
+ if (prev_high != cur_high) {
+ WRITE_ONCE(cpudata->prefcore_ranking, cur_high);
+ sched_set_itmt_core_prio(cur_high, cpu);
+ }
+
+ cpufreq_cpu_put(policy);
+}
+
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
@@ -913,7 +943,7 @@ static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
u32 perf;
struct amd_cpudata *cpudata = policy->driver_data;
- perf = READ_ONCE(cpudata->highest_perf);
+ perf = READ_ONCE(cpudata->prefcore_ranking);
return sysfs_emit(buf, "%u\n", perf);
}
@@ -1473,6 +1503,7 @@ static struct cpufreq_driver amd_pstate_driver = {
.suspend = amd_pstate_cpu_suspend,
.resume = amd_pstate_cpu_resume,
.set_boost = amd_pstate_set_boost,
+ .update_highest_perf = amd_pstate_update_highest_perf,
.name = "amd-pstate",
.attr = amd_pstate_attr,
};
@@ -1487,6 +1518,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
.online = amd_pstate_epp_cpu_online,
.suspend = amd_pstate_epp_suspend,
.resume = amd_pstate_epp_resume,
+ .update_highest_perf = amd_pstate_update_highest_perf,
.name = "amd-pstate-epp",
.attr = amd_pstate_epp_attr,
};
@@ -39,11 +39,16 @@ struct amd_aperf_mperf {
* @cppc_req_cached: cached performance request hints
* @highest_perf: the maximum performance an individual processor may reach,
* assuming ideal conditions
+ * For platforms that do not support the preferred core feature, the
+ * highest_pef may be configured with 166 or 255, to avoid max frequency
+ * calculated wrongly. we take the fixed value as the highest_perf.
* @nominal_perf: the maximum sustained performance level of the processor,
* assuming ideal operating conditions
* @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
* savings are achieved
* @lowest_perf: the absolute lowest performance level of the processor
+ * @prefcore_ranking: the preferred core ranking, the higher value indicates a higher
+ * priority.
* @max_freq: the frequency that mapped to highest_perf
* @min_freq: the frequency that mapped to lowest_perf
* @nominal_freq: the frequency that mapped to nominal_perf
@@ -70,6 +75,7 @@ struct amd_cpudata {
u32 nominal_perf;
u32 lowest_nonlinear_perf;
u32 lowest_perf;
+ u32 prefcore_ranking;
u32 max_freq;
u32 min_freq;