@@ -115,7 +115,8 @@ struct amd_aperf_mperf {
* @cppc_cap1_cached: Cached value of the last CPPC Capabilities MSR
* @update_util: Cpufreq utility callback information
* @sample: the stored performance sample
-
+ * @suspended: Whether or not the driver has been suspended.
+ *
* The amd_cpudata is key private data for each CPU thread in AMD P-State, and
* represents all the attributes and goals that AMD P-State requests at runtime.
*/
@@ -155,6 +156,7 @@ struct amd_cpudata {
u64 cppc_cap1_cached;
struct update_util_data update_util;
struct amd_aperf_mperf sample;
+ bool suspended;
};
/**
@@ -215,6 +217,9 @@ static DEFINE_SPINLOCK(amd_pstate_cpu_lock);
static bool cppc_boost __read_mostly;
struct kobject *amd_pstate_kobj;
+/* the flag whether the pstate driver is exiting */
+static bool pstate_exiting;
+
#ifdef CONFIG_ACPI_CPPC_LIB
static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
{
@@ -1377,6 +1382,96 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
return 0;
}
+static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+{
+ struct cppc_perf_ctrls perf_ctrls;
+ u64 value, max_perf;
+ int ret;
+
+ ret = amd_pstate_enable(true);
+ if (ret)
+ pr_err("failed to enable amd pstate during resume, return %d\n", ret);
+
+ value = READ_ONCE(cpudata->cppc_req_cached);
+ max_perf = READ_ONCE(cpudata->highest_perf);
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ } else {
+ perf_ctrls.max_perf = max_perf;
+ perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
+ cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ }
+}
+
+static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = all_cpu_data[policy->cpu];
+
+ pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
+
+ if (epp) {
+ amd_pstate_epp_reenable(cpudata);
+ cpudata->suspended = false;
+ }
+
+ return 0;
+}
+
+static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = all_cpu_data[policy->cpu];
+ struct cppc_perf_ctrls perf_ctrls;
+ int min_perf;
+ u64 value;
+
+ min_perf = READ_ONCE(cpudata->lowest_perf);
+ value = READ_ONCE(cpudata->cppc_req_cached);
+
+ mutex_lock(&amd_pstate_limits_lock);
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
+
+ /* Set max perf same as min perf */
+ value &= ~AMD_CPPC_MAX_PERF(~0L);
+ value |= AMD_CPPC_MAX_PERF(min_perf);
+ value &= ~AMD_CPPC_MIN_PERF(~0L);
+ value |= AMD_CPPC_MIN_PERF(min_perf);
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ } else {
+ perf_ctrls.desired_perf = 0;
+ perf_ctrls.max_perf = min_perf;
+ perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(AMD_CPPC_EPP_POWERSAVE);
+ cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ }
+ mutex_unlock(&amd_pstate_limits_lock);
+}
+
+static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = all_cpu_data[policy->cpu];
+
+ pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
+
+ if (cpudata->suspended)
+ return 0;
+
+ if (pstate_exiting)
+ return 0;
+
+ if (epp)
+ amd_pstate_epp_offline(policy);
+
+ return 0;
+}
+
+static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
+{
+ amd_pstate_clear_update_util_hook(policy->cpu);
+
+ return amd_pstate_cpu_offline(policy);
+}
+
static void amd_pstate_verify_cpu_policy(struct amd_cpudata *cpudata,
struct cpufreq_policy_data *policy)
{
@@ -1411,6 +1506,8 @@ static struct cpufreq_driver amd_pstate_epp_driver = {
.init = amd_pstate_epp_cpu_init,
.exit = amd_pstate_epp_cpu_exit,
.update_limits = amd_pstate_epp_update_limits,
+ .offline = amd_pstate_epp_cpu_offline,
+ .online = amd_pstate_epp_cpu_online,
.name = "amd_pstate_epp",
.attr = amd_pstate_epp_attr,
};
@@ -1480,6 +1577,7 @@ static int __init amd_pstate_init(void)
pr_err("amd-pstate: Sysfs attribute export failed with error %d.\n",
ret);
}
+ pstate_exiting = false;
return ret;
}
@@ -1494,6 +1592,7 @@ static void __exit amd_pstate_exit(void)
{
unsigned int cpu;
+ pstate_exiting = true;
cpufreq_unregister_driver(default_pstate_driver);
amd_pstate_enable(false);
The patch adds online and offline driver callback support to allow cpu cores go offline and help to restore the previous working states when core goes back online later for EPP driver mode. Signed-off-by: Perry Yuan <Perry.Yuan@amd.com> --- drivers/cpufreq/amd-pstate.c | 101 ++++++++++++++++++++++++++++++++++- 1 file changed, 100 insertions(+), 1 deletion(-)