Message ID | 20231129065437.290183-6-li.meng@amd.com |
---|---|
State | Superseded |
Headers | show |
Series | [V11,1/7] x86: Drop CPU_SUP_INTEL from SCHED_MC_PRIO for the expansion. | expand |
[AMD Official Use Only - General] > -----Original Message----- > From: Meng, Li (Jassmine) <Li.Meng@amd.com> > Sent: Wednesday, November 29, 2023 2:55 PM > To: Rafael J . Wysocki <rafael.j.wysocki@intel.com>; Huang, Ray > <Ray.Huang@amd.com> > Cc: linux-pm@vger.kernel.org; linux-kernel@vger.kernel.org; x86@kernel.org; > linux-acpi@vger.kernel.org; Shuah Khan <skhan@linuxfoundation.org>; linux- > kselftest@vger.kernel.org; Fontenot, Nathan <Nathan.Fontenot@amd.com>; > Sharma, Deepak <Deepak.Sharma@amd.com>; Deucher, Alexander > <Alexander.Deucher@amd.com>; Limonciello, Mario > <Mario.Limonciello@amd.com>; Huang, Shimmer > <Shimmer.Huang@amd.com>; Yuan, Perry <Perry.Yuan@amd.com>; Du, > Xiaojian <Xiaojian.Du@amd.com>; Viresh Kumar <viresh.kumar@linaro.org>; > Borislav Petkov <bp@alien8.de>; Oleksandr Natalenko > <oleksandr@natalenko.name>; Meng, Li (Jassmine) <Li.Meng@amd.com>; > Karny, Wyes <Wyes.Karny@amd.com> > Subject: [PATCH V11 5/7] cpufreq: amd-pstate: Update amd-pstate preferred > core ranking dynamically > > Preferred core rankings can be changed dynamically by the platform based on > the workload and platform conditions and accounting for thermals and aging. > When this occurs, cpu priority need to be set. > > Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name> > Reviewed-by: Mario Limonciello <mario.limonciello@amd.com> > Reviewed-by: Wyes Karny <wyes.karny@amd.com> > Reviewed-by: Huang Rui <ray.huang@amd.com> > Signed-off-by: Meng Li <li.meng@amd.com> > --- > drivers/cpufreq/amd-pstate.c | 46 > ++++++++++++++++++++++++++++++++++++ > include/linux/amd-pstate.h | 6 +++++ > 2 files changed, 52 insertions(+) > > diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c > index 74dcf63d75f9..88df6510dcc0 100644 > --- a/drivers/cpufreq/amd-pstate.c > +++ b/drivers/cpufreq/amd-pstate.c > @@ -312,6 +312,7 @@ static int pstate_init_perf(struct amd_cpudata > *cpudata) > WRITE_ONCE(cpudata->nominal_perf, > AMD_CPPC_NOMINAL_PERF(cap1)); > WRITE_ONCE(cpudata->lowest_nonlinear_perf, > AMD_CPPC_LOWNONLIN_PERF(cap1)); > WRITE_ONCE(cpudata->lowest_perf, > AMD_CPPC_LOWEST_PERF(cap1)); > + WRITE_ONCE(cpudata->prefcore_ranking, > AMD_CPPC_HIGHEST_PERF(cap1)); > > return 0; > } > @@ -333,6 +334,7 @@ static int cppc_init_perf(struct amd_cpudata > *cpudata) > WRITE_ONCE(cpudata->lowest_nonlinear_perf, > cppc_perf.lowest_nonlinear_perf); > WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); > + WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf); > > if (cppc_state == AMD_PSTATE_ACTIVE) > return 0; > @@ -749,6 +751,34 @@ static void amd_pstate_init_prefcore(struct > amd_cpudata *cpudata) > schedule_work(&sched_prefcore_work); > } > > +static void amd_pstate_update_highest_perf(unsigned int cpu) { > + struct cpufreq_policy *policy; > + struct amd_cpudata *cpudata; > + u32 prev_high = 0, cur_high = 0; > + int ret; > + > + if ((!amd_pstate_prefcore) || (!cpudata->hw_prefcore)) > + return; > + > + ret = amd_pstate_get_highest_perf(cpu, &cur_high); > + if (ret) > + return; > + > + policy = cpufreq_cpu_get(cpu); > + cpudata = policy->driver_data; > + prev_high = READ_ONCE(cpudata->prefcore_ranking); > + > + if (prev_high != cur_high) { > + WRITE_ONCE(cpudata->prefcore_ranking, cur_high); > + > + if (cur_high < CPPC_MAX_PERF) > + sched_set_itmt_core_prio((int)cur_high, cpu); > + } > + > + cpufreq_cpu_put(policy); > +} > + > static int amd_pstate_cpu_init(struct cpufreq_policy *policy) { > int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret; > @@ -920,6 +950,17 @@ static ssize_t show_amd_pstate_highest_perf(struct > cpufreq_policy *policy, > return sysfs_emit(buf, "%u\n", perf); > } > > +static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy > *policy, > + char *buf) > +{ > + u32 perf; > + struct amd_cpudata *cpudata = policy->driver_data; > + > + perf = READ_ONCE(cpudata->prefcore_ranking); > + > + return sysfs_emit(buf, "%u\n", perf); > +} > + > static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy, > char *buf) > { > @@ -1133,6 +1174,7 @@ cpufreq_freq_attr_ro(amd_pstate_max_freq); > cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq); > > cpufreq_freq_attr_ro(amd_pstate_highest_perf); > +cpufreq_freq_attr_ro(amd_pstate_prefcore_ranking); > cpufreq_freq_attr_ro(amd_pstate_hw_prefcore); > cpufreq_freq_attr_rw(energy_performance_preference); > cpufreq_freq_attr_ro(energy_performance_available_preferences); > @@ -1143,6 +1185,7 @@ static struct freq_attr *amd_pstate_attr[] = { > &amd_pstate_max_freq, > &amd_pstate_lowest_nonlinear_freq, > &amd_pstate_highest_perf, > + &amd_pstate_prefcore_ranking, > &amd_pstate_hw_prefcore, > NULL, > }; > @@ -1151,6 +1194,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = { > &amd_pstate_max_freq, > &amd_pstate_lowest_nonlinear_freq, > &amd_pstate_highest_perf, > + &amd_pstate_prefcore_ranking, > &amd_pstate_hw_prefcore, > &energy_performance_preference, > &energy_performance_available_preferences, > @@ -1491,6 +1535,7 @@ static struct cpufreq_driver amd_pstate_driver = { > .suspend = amd_pstate_cpu_suspend, > .resume = amd_pstate_cpu_resume, > .set_boost = amd_pstate_set_boost, > + .update_highest_perf = amd_pstate_update_highest_perf, > .name = "amd-pstate", > .attr = amd_pstate_attr, > }; > @@ -1505,6 +1550,7 @@ static struct cpufreq_driver > amd_pstate_epp_driver = { > .online = amd_pstate_epp_cpu_online, > .suspend = amd_pstate_epp_suspend, > .resume = amd_pstate_epp_resume, > + .update_highest_perf = amd_pstate_update_highest_perf, > .name = "amd-pstate-epp", > .attr = amd_pstate_epp_attr, > }; > diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h index > 87e140e9e6db..426822612373 100644 > --- a/include/linux/amd-pstate.h > +++ b/include/linux/amd-pstate.h > @@ -39,11 +39,16 @@ struct amd_aperf_mperf { > * @cppc_req_cached: cached performance request hints > * @highest_perf: the maximum performance an individual processor may > reach, > * assuming ideal conditions > + * For platforms that do not support the preferred core feature, > the > + * highest_pef may be configured with 166 or 255, to avoid > max frequency > + * calculated wrongly. we take the fixed value as the > highest_perf. > * @nominal_perf: the maximum sustained performance level of the > processor, > * assuming ideal operating conditions > * @lowest_nonlinear_perf: the lowest performance level at which nonlinear > power > * savings are achieved > * @lowest_perf: the absolute lowest performance level of the processor > + * @prefcore_ranking: the preferred core ranking, the higher value indicates a > higher > + * priority. > * @max_freq: the frequency that mapped to highest_perf > * @min_freq: the frequency that mapped to lowest_perf > * @nominal_freq: the frequency that mapped to nominal_perf @@ -73,6 > +78,7 @@ struct amd_cpudata { > u32 nominal_perf; > u32 lowest_nonlinear_perf; > u32 lowest_perf; > + u32 prefcore_ranking; > > u32 max_freq; > u32 min_freq; > -- > 2.34.1 Reviewed-by: Perry Yuan <perry.yuan@amd.com>
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 74dcf63d75f9..88df6510dcc0 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -312,6 +312,7 @@ static int pstate_init_perf(struct amd_cpudata *cpudata) WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1)); WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1)); WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1)); + WRITE_ONCE(cpudata->prefcore_ranking, AMD_CPPC_HIGHEST_PERF(cap1)); return 0; } @@ -333,6 +334,7 @@ static int cppc_init_perf(struct amd_cpudata *cpudata) WRITE_ONCE(cpudata->lowest_nonlinear_perf, cppc_perf.lowest_nonlinear_perf); WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf); + WRITE_ONCE(cpudata->prefcore_ranking, cppc_perf.highest_perf); if (cppc_state == AMD_PSTATE_ACTIVE) return 0; @@ -749,6 +751,34 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata) schedule_work(&sched_prefcore_work); } +static void amd_pstate_update_highest_perf(unsigned int cpu) +{ + struct cpufreq_policy *policy; + struct amd_cpudata *cpudata; + u32 prev_high = 0, cur_high = 0; + int ret; + + if ((!amd_pstate_prefcore) || (!cpudata->hw_prefcore)) + return; + + ret = amd_pstate_get_highest_perf(cpu, &cur_high); + if (ret) + return; + + policy = cpufreq_cpu_get(cpu); + cpudata = policy->driver_data; + prev_high = READ_ONCE(cpudata->prefcore_ranking); + + if (prev_high != cur_high) { + WRITE_ONCE(cpudata->prefcore_ranking, cur_high); + + if (cur_high < CPPC_MAX_PERF) + sched_set_itmt_core_prio((int)cur_high, cpu); + } + + cpufreq_cpu_put(policy); +} + static int amd_pstate_cpu_init(struct cpufreq_policy *policy) { int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret; @@ -920,6 +950,17 @@ static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy, return sysfs_emit(buf, "%u\n", perf); } +static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy, + char *buf) +{ + u32 perf; + struct amd_cpudata *cpudata = policy->driver_data; + + perf = READ_ONCE(cpudata->prefcore_ranking); + + return sysfs_emit(buf, "%u\n", perf); +} + static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy, char *buf) { @@ -1133,6 +1174,7 @@ cpufreq_freq_attr_ro(amd_pstate_max_freq); cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq); cpufreq_freq_attr_ro(amd_pstate_highest_perf); +cpufreq_freq_attr_ro(amd_pstate_prefcore_ranking); cpufreq_freq_attr_ro(amd_pstate_hw_prefcore); cpufreq_freq_attr_rw(energy_performance_preference); cpufreq_freq_attr_ro(energy_performance_available_preferences); @@ -1143,6 +1185,7 @@ static struct freq_attr *amd_pstate_attr[] = { &amd_pstate_max_freq, &amd_pstate_lowest_nonlinear_freq, &amd_pstate_highest_perf, + &amd_pstate_prefcore_ranking, &amd_pstate_hw_prefcore, NULL, }; @@ -1151,6 +1194,7 @@ static struct freq_attr *amd_pstate_epp_attr[] = { &amd_pstate_max_freq, &amd_pstate_lowest_nonlinear_freq, &amd_pstate_highest_perf, + &amd_pstate_prefcore_ranking, &amd_pstate_hw_prefcore, &energy_performance_preference, &energy_performance_available_preferences, @@ -1491,6 +1535,7 @@ static struct cpufreq_driver amd_pstate_driver = { .suspend = amd_pstate_cpu_suspend, .resume = amd_pstate_cpu_resume, .set_boost = amd_pstate_set_boost, + .update_highest_perf = amd_pstate_update_highest_perf, .name = "amd-pstate", .attr = amd_pstate_attr, }; @@ -1505,6 +1550,7 @@ static struct cpufreq_driver amd_pstate_epp_driver = { .online = amd_pstate_epp_cpu_online, .suspend = amd_pstate_epp_suspend, .resume = amd_pstate_epp_resume, + .update_highest_perf = amd_pstate_update_highest_perf, .name = "amd-pstate-epp", .attr = amd_pstate_epp_attr, }; diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h index 87e140e9e6db..426822612373 100644 --- a/include/linux/amd-pstate.h +++ b/include/linux/amd-pstate.h @@ -39,11 +39,16 @@ struct amd_aperf_mperf { * @cppc_req_cached: cached performance request hints * @highest_perf: the maximum performance an individual processor may reach, * assuming ideal conditions + * For platforms that do not support the preferred core feature, the + * highest_pef may be configured with 166 or 255, to avoid max frequency + * calculated wrongly. we take the fixed value as the highest_perf. * @nominal_perf: the maximum sustained performance level of the processor, * assuming ideal operating conditions * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power * savings are achieved * @lowest_perf: the absolute lowest performance level of the processor + * @prefcore_ranking: the preferred core ranking, the higher value indicates a higher + * priority. * @max_freq: the frequency that mapped to highest_perf * @min_freq: the frequency that mapped to lowest_perf * @nominal_freq: the frequency that mapped to nominal_perf @@ -73,6 +78,7 @@ struct amd_cpudata { u32 nominal_perf; u32 lowest_nonlinear_perf; u32 lowest_perf; + u32 prefcore_ranking; u32 max_freq; u32 min_freq;