Message ID | 20221219064042.661122-8-perry.yuan@amd.com |
---|---|
State | Superseded |
Headers | show |
Series | [v8,01/13] ACPI: CPPC: Add AMD pstate energy performance preference cppc control | expand |
On Mon, Dec 19, 2022 at 02:40:36PM +0800, Yuan, Perry wrote: > From: Perry Yuan <Perry.Yuan@amd.com> > > Adds online and offline driver callback support to allow cpu cores go > offline and help to restore the previous working states when core goes > back online later for EPP driver mode. > > Reviewed-by: Mario Limonciello <Mario.Limonciello@amd.com> > Signed-off-by: Perry Yuan <Perry.Yuan@amd.com> Acked-by: Huang Rui <ray.huang@amd.com> > --- > drivers/cpufreq/amd-pstate.c | 82 ++++++++++++++++++++++++++++++++++++ > include/linux/amd-pstate.h | 1 + > 2 files changed, 83 insertions(+) > > diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c > index 66b39457a312..cb647f55a169 100644 > --- a/drivers/cpufreq/amd-pstate.c > +++ b/drivers/cpufreq/amd-pstate.c > @@ -1020,6 +1020,86 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) > return 0; > } > > +static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata) > +{ > + struct cppc_perf_ctrls perf_ctrls; > + u64 value, max_perf; > + int ret; > + > + ret = amd_pstate_enable(true); > + if (ret) > + pr_err("failed to enable amd pstate during resume, return %d\n", ret); > + > + value = READ_ONCE(cpudata->cppc_req_cached); > + max_perf = READ_ONCE(cpudata->highest_perf); > + > + if (boot_cpu_has(X86_FEATURE_CPPC)) { > + wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); > + } else { > + perf_ctrls.max_perf = max_perf; > + perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached); > + cppc_set_perf(cpudata->cpu, &perf_ctrls); > + } > +} > + > +static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy) > +{ > + struct amd_cpudata *cpudata = all_cpu_data[policy->cpu]; > + > + pr_debug("AMD CPU Core %d going online\n", cpudata->cpu); > + > + if (cppc_state == AMD_PSTATE_ACTIVE) { > + amd_pstate_epp_reenable(cpudata); > + cpudata->suspended = false; > + } > + > + return 0; > +} > + > +static void amd_pstate_epp_offline(struct cpufreq_policy *policy) > +{ > + struct amd_cpudata *cpudata = all_cpu_data[policy->cpu]; > + struct cppc_perf_ctrls perf_ctrls; > + int min_perf; > + u64 value; > + > + min_perf = READ_ONCE(cpudata->lowest_perf); > + value = READ_ONCE(cpudata->cppc_req_cached); > + > + mutex_lock(&amd_pstate_limits_lock); > + if (boot_cpu_has(X86_FEATURE_CPPC)) { > + cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN; > + > + /* Set max perf same as min perf */ > + value &= ~AMD_CPPC_MAX_PERF(~0L); > + value |= AMD_CPPC_MAX_PERF(min_perf); > + value &= ~AMD_CPPC_MIN_PERF(~0L); > + value |= AMD_CPPC_MIN_PERF(min_perf); > + wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); > + } else { > + perf_ctrls.desired_perf = 0; > + perf_ctrls.max_perf = min_perf; > + perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE); > + cppc_set_perf(cpudata->cpu, &perf_ctrls); > + } > + mutex_unlock(&amd_pstate_limits_lock); > +} > + > +static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy) > +{ > + struct amd_cpudata *cpudata = all_cpu_data[policy->cpu]; > + > + pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu); > + > + if (cpudata->suspended) > + return 0; > + > + if (cppc_state == AMD_PSTATE_ACTIVE) > + amd_pstate_epp_offline(policy); > + > + return 0; > +} > + > static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy) > { > cpufreq_verify_within_cpu_limits(policy); > @@ -1047,6 +1127,8 @@ static struct cpufreq_driver amd_pstate_epp_driver = { > .init = amd_pstate_epp_cpu_init, > .exit = amd_pstate_epp_cpu_exit, > .update_limits = amd_pstate_epp_update_limits, > + .offline = amd_pstate_epp_cpu_offline, > + .online = amd_pstate_epp_cpu_online, > .name = "amd_pstate_epp", > .attr = amd_pstate_epp_attr, > }; > diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h > index fe1aef743c09..1424b09ef543 100644 > --- a/include/linux/amd-pstate.h > +++ b/include/linux/amd-pstate.h > @@ -82,6 +82,7 @@ struct amd_cpudata { > s16 epp_cached; > u32 policy; > u64 cppc_cap1_cached; > + bool suspended; > }; > > /** > -- > 2.34.1 >
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 66b39457a312..cb647f55a169 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -1020,6 +1020,86 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) return 0; } +static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata) +{ + struct cppc_perf_ctrls perf_ctrls; + u64 value, max_perf; + int ret; + + ret = amd_pstate_enable(true); + if (ret) + pr_err("failed to enable amd pstate during resume, return %d\n", ret); + + value = READ_ONCE(cpudata->cppc_req_cached); + max_perf = READ_ONCE(cpudata->highest_perf); + + if (boot_cpu_has(X86_FEATURE_CPPC)) { + wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); + } else { + perf_ctrls.max_perf = max_perf; + perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached); + cppc_set_perf(cpudata->cpu, &perf_ctrls); + } +} + +static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy) +{ + struct amd_cpudata *cpudata = all_cpu_data[policy->cpu]; + + pr_debug("AMD CPU Core %d going online\n", cpudata->cpu); + + if (cppc_state == AMD_PSTATE_ACTIVE) { + amd_pstate_epp_reenable(cpudata); + cpudata->suspended = false; + } + + return 0; +} + +static void amd_pstate_epp_offline(struct cpufreq_policy *policy) +{ + struct amd_cpudata *cpudata = all_cpu_data[policy->cpu]; + struct cppc_perf_ctrls perf_ctrls; + int min_perf; + u64 value; + + min_perf = READ_ONCE(cpudata->lowest_perf); + value = READ_ONCE(cpudata->cppc_req_cached); + + mutex_lock(&amd_pstate_limits_lock); + if (boot_cpu_has(X86_FEATURE_CPPC)) { + cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN; + + /* Set max perf same as min perf */ + value &= ~AMD_CPPC_MAX_PERF(~0L); + value |= AMD_CPPC_MAX_PERF(min_perf); + value &= ~AMD_CPPC_MIN_PERF(~0L); + value |= AMD_CPPC_MIN_PERF(min_perf); + wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); + } else { + perf_ctrls.desired_perf = 0; + perf_ctrls.max_perf = min_perf; + perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE); + cppc_set_perf(cpudata->cpu, &perf_ctrls); + } + mutex_unlock(&amd_pstate_limits_lock); +} + +static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy) +{ + struct amd_cpudata *cpudata = all_cpu_data[policy->cpu]; + + pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu); + + if (cpudata->suspended) + return 0; + + if (cppc_state == AMD_PSTATE_ACTIVE) + amd_pstate_epp_offline(policy); + + return 0; +} + static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy) { cpufreq_verify_within_cpu_limits(policy); @@ -1047,6 +1127,8 @@ static struct cpufreq_driver amd_pstate_epp_driver = { .init = amd_pstate_epp_cpu_init, .exit = amd_pstate_epp_cpu_exit, .update_limits = amd_pstate_epp_update_limits, + .offline = amd_pstate_epp_cpu_offline, + .online = amd_pstate_epp_cpu_online, .name = "amd_pstate_epp", .attr = amd_pstate_epp_attr, }; diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h index fe1aef743c09..1424b09ef543 100644 --- a/include/linux/amd-pstate.h +++ b/include/linux/amd-pstate.h @@ -82,6 +82,7 @@ struct amd_cpudata { s16 epp_cached; u32 policy; u64 cppc_cap1_cached; + bool suspended; }; /**