@@ -7,13 +7,15 @@ struct cpu_topology {
int thread_id;
int core_id;
int cluster_id;
+ int package_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
};
extern struct cpu_topology cpu_topology[NR_CPUS];
-#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id)
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id)
+#define topology_cod_id(cpu) (cpu_topology[cpu].cluster_id)
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
@@ -67,6 +67,8 @@ static int __init parse_core(struct device_node *core, int cluster_id,
leaf = false;
cpu = get_cpu_for_node(t);
if (cpu >= 0) {
+ /* maintain DT cluster == package behavior */
+ cpu_topology[cpu].package_id = cluster_id;
cpu_topology[cpu].cluster_id = cluster_id;
cpu_topology[cpu].core_id = core_id;
cpu_topology[cpu].thread_id = i;
@@ -88,7 +90,7 @@ static int __init parse_core(struct device_node *core, int cluster_id,
core);
return -EINVAL;
}
-
+ cpu_topology[cpu].package_id = cluster_id;
cpu_topology[cpu].cluster_id = cluster_id;
cpu_topology[cpu].core_id = core_id;
} else if (leaf) {
@@ -228,7 +230,7 @@ static void update_siblings_masks(unsigned int cpuid)
for_each_possible_cpu(cpu) {
cpu_topo = &cpu_topology[cpu];
- if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
+ if (cpuid_topo->package_id != cpu_topo->package_id)
continue;
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
@@ -273,6 +275,7 @@ void store_cpu_topology(unsigned int cpuid)
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
}
+ cpuid_topo->package_id = cpuid_topo->cluster_id;
pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
@@ -292,6 +295,7 @@ static void __init reset_cpu_topology(void)
cpu_topo->thread_id = -1;
cpu_topo->core_id = 0;
cpu_topo->cluster_id = -1;
+ cpu_topo->package_id = -1;
cpumask_clear(&cpu_topo->core_sibling);
cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
@@ -184,6 +184,9 @@ static inline int cpu_to_mem(int cpu)
#ifndef topology_physical_package_id
#define topology_physical_package_id(cpu) ((void)(cpu), -1)
#endif
+#ifndef topology_cod_id /* cluster on die */
+#define topology_cod_id(cpu) topology_physical_package_id(cpu)
+#endif
#ifndef topology_core_id
#define topology_core_id(cpu) ((void)(cpu), 0)
#endif
Many modern machines have cluster on die (COD) non-uniformity as well as the traditional multi-socket architectures. Reusing the multi-socket or NUMA on die concepts for these (as arm64 does) breaks down when presented with actual multi-socket/COD machines. Similar, problems are also visible on some x86 machines so it seems appropriate to start abstracting and making these topologies visible. To start, a topology_cod_id() macro is added which defaults to returning the same information as topology_physical_package_id(). Moving forward we can start to spit out the differences. For arm64, an additional package_id is added to the cpu_topology array. Initially this will be equal to the cluster_id as well. Signed-off-by: Jeremy Linton <jeremy.linton@arm.com> --- arch/arm64/include/asm/topology.h | 4 +++- arch/arm64/kernel/topology.c | 8 ++++++-- include/linux/topology.h | 3 +++ 3 files changed, 12 insertions(+), 3 deletions(-) -- 2.13.5 -- To unsubscribe from this list: send the line "unsubscribe linux-acpi" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html