@@ -325,6 +325,8 @@ endmenu
menu "CPU Power Management"
+source "drivers/cpufreq/Kconfig"
+
source "drivers/cpuidle/Kconfig"
endmenu
@@ -222,13 +222,22 @@ config GENERIC_CPUFREQ_CPU0
If in doubt, say N.
+config CPPC_CPUFREQ
+ bool "CPPC CPUFreq driver"
+ depends on ACPI && ACPI_PCC
+ default n
+ help
+ CPPC is Collaborative Processor Performance Control. It allows the OS
+ to request CPU performance in an abstract manner and lets the platform
+ (e.g. BMC) interpret it in a way that is specific to that platform.
+
menu "x86 CPU frequency scaling drivers"
depends on X86
source "drivers/cpufreq/Kconfig.x86"
endmenu
menu "ARM CPU frequency scaling drivers"
-depends on ARM
+depends on ARM || ARM64
source "drivers/cpufreq/Kconfig.arm"
endmenu
@@ -14,6 +14,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
+obj-$(CONFIG_CPPC_CPUFREQ) += cppc-cpufreq.o
##################################################################################
# x86 drivers.
# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
new file mode 100644
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd.
+ * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <acpi/actbl.h>
+
+#define MAX_CPPC_PCC_ENT 15
+#define CPPC_EN 1
+
+#define CMD_COMPLETE 1
+
+/* PCC Commands used by CPPC */
+enum cppc_ppc_cmds {
+ CMD_READ,
+ CMD_WRITE,
+ RESERVED,
+};
+
+#define PCC_SUBSPACE_IDX 2
+
+struct cpc_desc {
+ unsigned int num_entries;
+ unsigned int version;
+ struct acpi_generic_address pcc_regs[MAX_CPPC_PCC_ENT];
+};
+
+/* These are indexes into the per-cpu pcc_regs[] */
+enum cppc_pcc_regs {
+ HIGHEST_PERF, // Highest Performance
+ NOMINAL_PERF, // Nominal Performance
+ LOW_NON_LINEAR_PERF, // Lowest Nonlinear Performance
+ LOWEST_PERF, // Lowest Performance
+ GUARANTEED_PERF, // Guaranteed Performance Register
+ DESIRED_PERF, // Desired Performance Register
+ MIN_PERF, // Minimum Performance Register
+ MAX_PERF, // Maximum Performance Register
+ PERF_REDUC_TOLERANCE, // Performance Reduction Tolerance Register
+ TIME_WINDOW, // Time Window Register
+ CTR_WRAP_TIME, // Counter Wraparound Time
+ NOMINAL_CTR, // Nominal Counter Register
+ DELIVERED_CTR, // Delivered Counter Register
+ PERF_LIMITED, // Performance Limited Register
+ ENABLE // Enable Register
+};
+
+static struct cpc_desc __percpu *cpc_desc;
+
+/* PCC Shared COMM region base address for this client */
+static u64 pcc_comm_base_addr; /* Returned by the Subspace structure */
+static void __iomem *comm_base_addr; /* For use after ioremap */
+
+extern int get_pcc_comm_channel(u32 ss_idx, u64* addr, int *len);
+extern u16 send_pcc_cmd(u8 cmd, u8 sci, u32 ss_idx, u64 * __iomem base_addr);
+
+static u64 past_delivered;
+static u64 past_nominal;
+
+static unsigned int cppc_get_freq(unsigned int cpu)
+{
+ struct cpc_desc *current_cpu_cpc = per_cpu(cpc_desc, cpu);
+ u64 curr_delivered, curr_nominal, curr_perf;
+ u16 status;
+
+ status = send_pcc_cmd(CMD_READ, 0, PCC_SUBSPACE_IDX, comm_base_addr);
+ if (status & CMD_COMPLETE) {
+ acpi_read(&curr_delivered, ¤t_cpu_cpc->pcc_regs[DELIVERED_CTR]);
+ acpi_read(&curr_nominal, ¤t_cpu_cpc->pcc_regs[NOMINAL_CTR]);
+
+ /* XXX: Check for overflow regs. */
+ curr_perf = (curr_nominal) * ((curr_delivered - past_delivered)
+ / (curr_nominal - past_nominal));
+ } else {
+ pr_err("Failed to get Delivered Perf for CPU:%d\n", cpu);
+ return -EINVAL;
+ }
+
+ return curr_perf;
+}
+
+/* For each CPU, get its _CPC table and extract its Perf thresholds. */
+static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *out_obj, *pcc_obj;
+ struct cpc_desc *current_cpu_cpc = per_cpu(cpc_desc, cpu);
+ struct acpi_generic_address *gas_t;
+ char proc_name[11];
+ unsigned int num_ent, ret = 0, i;
+ acpi_handle handle;
+ acpi_status status;
+ u16 pcc_status;
+
+ /* Search for this CPU's _CPC and populate its info. */
+ sprintf(proc_name, "\\_SB.CPU%d", cpu);
+
+ status = acpi_get_handle(NULL, proc_name, &handle);
+ if (ACPI_FAILURE(status)) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ if (!acpi_has_method(handle, "_CPC")) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ status = acpi_evaluate_object(handle, "_CPC", NULL, &output);
+ if (ACPI_FAILURE(status)) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ out_obj = (union acpi_object *) output.pointer;
+ if (out_obj->type != ACPI_TYPE_PACKAGE) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ num_ent = out_obj->package.count;
+ current_cpu_cpc->num_entries = num_ent;
+
+ /* Iterate through each entry in _CPC */
+ for (i=0; i<num_ent; i++) {
+ pcc_obj = &out_obj->package.elements[i];
+
+ if (pcc_obj->type != ACPI_TYPE_BUFFER) {
+ pr_err("Malformed PCC entry in CPC table\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ gas_t = (struct acpi_generic_address *)pcc_obj->buffer.pointer;
+
+ /* Get PCC parameters for each CPPC register. */
+ current_cpu_cpc->pcc_regs[i] = (struct acpi_generic_address) {
+ .space_id = gas_t->space_id,
+ .bit_width = gas_t->bit_width,
+ .bit_offset = gas_t->bit_offset,
+ .access_width = gas_t->access_width,
+ /* PCC communication space begins 8 bytes after PCCT shared mem header */
+ .address = (u64) (comm_base_addr + 8 + (u64) gas_t->address),
+ };
+ }
+
+ /* Get the MAX and MIN Thresholds for this CPU. */
+ pcc_status = send_pcc_cmd(CMD_READ, 0, PCC_SUBSPACE_IDX, comm_base_addr);
+ if (pcc_status & CMD_COMPLETE) {
+ u64 max, min;
+ /*XXX: policy needs to be modified to take in all 64bits. */
+ acpi_read(&max, ¤t_cpu_cpc->pcc_regs[HIGHEST_PERF]);
+ policy->max = (u32) max;
+ acpi_read(&min, ¤t_cpu_cpc->pcc_regs[LOWEST_PERF]);
+ policy->min = (u32) min;
+ } else {
+ ret = -ENODEV;
+ pr_err("Failed to get CPPC parameters for CPU:%d\n", cpu);
+ goto out_free;
+ }
+
+ /* XXX: Is policy->related_cpus filled up via _PSD info? */
+
+ /* XXX: Populate this CPUs freq table data.*/
+ /* Enable CPPC on this CPU */
+ acpi_write(CPPC_EN, ¤t_cpu_cpc->pcc_regs[ENABLE]);
+ pcc_status = send_pcc_cmd(CMD_WRITE, 0, PCC_SUBSPACE_IDX, comm_base_addr);
+ if (pcc_status & CMD_COMPLETE) {
+ ret = -EINVAL;
+ pr_debug("Failed to init CPPC on CPU:%d\n", cpu);
+ }
+
+out_free:
+ kfree(output.pointer);
+ return ret;
+}
+
+static int cppc_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ cpufreq_verify_within_cpu_limits(policy);
+ return 0;
+}
+
+static int cppc_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int cpu = policy->cpu;
+ struct cpc_desc *current_cpu_cpc = per_cpu(cpc_desc, cpu);
+ struct cpufreq_freqs freqs;
+ u16 status;
+
+ freqs.old = policy->cur;
+ freqs.new = target_freq;
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ /* Set CPU Perf thresholds and current desired perf value. */
+ acpi_write(policy->max, ¤t_cpu_cpc->pcc_regs[MAX_PERF]);
+
+ acpi_write(policy->min, ¤t_cpu_cpc->pcc_regs[MIN_PERF]);
+
+ acpi_write(target_freq, ¤t_cpu_cpc->pcc_regs[DESIRED_PERF]);
+
+ status = send_pcc_cmd(CMD_WRITE, 0, PCC_SUBSPACE_IDX, comm_base_addr);
+ if (status & CMD_COMPLETE) {
+ pr_debug("Failed to set target CPU perf for CPU:%d, status:%d\n",
+ cpu, status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cppc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ iounmap(comm_base_addr);
+ free_percpu(cpc_desc);
+ return 0;
+}
+
+static struct cpufreq_driver cppc_cpufreq_driver = {
+ .get = cppc_get_freq,
+ .verify = cppc_cpufreq_verify,
+/* XXX: setpolicy gives us a high and low range for CPU perf,
+ * but doesnt give what is current desired CPU perf value ?
+ */
+// .setpolicy = cppc_cpufreq_setpolicy,
+ .target = cppc_cpufreq_target,
+ .init = cppc_cpufreq_cpu_init,
+ .exit = cppc_cpufreq_cpu_exit,
+ .name = "cppc-cpufreq",
+};
+
+static int __init cppc_cpufreq_init(void)
+{
+ int ret;
+ int len;
+
+ if (acpi_disabled)
+ return 0;
+
+ /* Per CPU descriptors for _CPC. */
+ cpc_desc = alloc_percpu(struct cpc_desc);
+
+ if (!cpc_desc) {
+ ret = -ENOMEM;
+ pr_debug("No mem for CPC descriptors\n");
+ goto out_err;
+ }
+
+ /* PCC Subspace Communication region for CPPC. */
+ ret = get_pcc_comm_channel(PCC_SUBSPACE_IDX, &pcc_comm_base_addr, &len);
+
+ if (ret) {
+ pr_err("No PCC Communication channel found\n");
+ ret = -ENODEV;
+ goto out_err;
+ }
+
+ /* Base address returned from PCC subspace desc needs to ioremap'd.
+ * Used by the client to send/recv data from platform.
+ */
+ comm_base_addr = ioremap_nocache(pcc_comm_base_addr, len);
+
+ if (comm_base_addr) {
+ pr_err("Could not map PCC communicate channel\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ /* Plug into CPUFreq subsystem. */
+ ret = cpufreq_register_driver(&cppc_cpufreq_driver);
+ return ret;
+
+out_err:
+ free_percpu(cpc_desc);
+ return ret;
+}
+
+late_initcall(cppc_cpufreq_init);
+/*XXX: Add kmod support */
Add initial support for CPPC as defined in the ACPI5.0a spec. Signed-off-by: Ashwin Chaugule <ashwin.chaugule@linaro.org> --- arch/arm64/Kconfig | 2 + drivers/cpufreq/Kconfig | 11 +- drivers/cpufreq/Makefile | 1 + drivers/cpufreq/cppc-cpufreq.c | 298 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 311 insertions(+), 1 deletion(-) create mode 100644 drivers/cpufreq/cppc-cpufreq.c