@@ -351,20 +351,21 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
int cpu;
int addr_cells = 1;
const MachineState *ms = MACHINE(vms);
+ const VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
int smp_cpus = ms->smp.cpus;
/*
- * From Documentation/devicetree/bindings/arm/cpus.txt
- * On ARM v8 64-bit systems value should be set to 2,
- * that corresponds to the MPIDR_EL1 register size.
- * If MPIDR_EL1[63:32] value is equal to 0 on all CPUs
- * in the system, #address-cells can be set to 1, since
- * MPIDR_EL1[63:32] bits are not used for CPUs
- * identification.
+ * See Linux Documentation/devicetree/bindings/arm/cpus.yaml
+ * On ARM v8 64-bit systems value should be set to 2,
+ * that corresponds to the MPIDR_EL1 register size.
+ * If MPIDR_EL1[63:32] value is equal to 0 on all CPUs
+ * in the system, #address-cells can be set to 1, since
+ * MPIDR_EL1[63:32] bits are not used for CPUs
+ * identification.
*
- * Here we actually don't know whether our system is 32- or 64-bit one.
- * The simplest way to go is to examine affinity IDs of all our CPUs. If
- * at least one of them has Aff3 populated, we set #address-cells to 2.
+ * Here we actually don't know whether our system is 32- or 64-bit one.
+ * The simplest way to go is to examine affinity IDs of all our CPUs. If
+ * at least one of them has Aff3 populated, we set #address-cells to 2.
*/
for (cpu = 0; cpu < smp_cpus; cpu++) {
ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
@@ -407,8 +408,57 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
ms->possible_cpus->cpus[cs->cpu_index].props.node_id);
}
+ if (!vmc->no_cpu_topology) {
+ qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle",
+ qemu_fdt_alloc_phandle(ms->fdt));
+ }
+
g_free(nodename);
}
+
+ if (!vmc->no_cpu_topology) {
+ /*
+ * Add vCPU topology description through fdt node cpu-map.
+ *
+ * See Linux Documentation/devicetree/bindings/cpu/cpu-topology.txt
+ * In a SMP system, the hierarchy of CPUs can be defined through
+ * four entities that are used to describe the layout of CPUs in
+ * the system: socket/cluster/core/thread.
+ *
+ * A socket node represents the boundary of system physical package
+ * and its child nodes must be one or more cluster nodes. A system
+ * can contain several layers of clustering within a single physical
+ * package and cluster nodes can be contained in parent cluster nodes.
+ *
+ * Given that cluster is not yet supported in the vCPU topology,
+ * we currently generate one cluster node within each socket node
+ * by default.
+ */
+ qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map");
+
+ for (cpu = smp_cpus - 1; cpu >= 0; cpu--) {
+ char *cpu_path = g_strdup_printf("/cpus/cpu@%d", cpu);
+ char *map_path;
+
+ if (ms->smp.threads > 1) {
+ map_path = g_strdup_printf(
+ "/cpus/cpu-map/socket%d/cluster0/core%d/thread%d",
+ cpu / (ms->smp.cores * ms->smp.threads),
+ (cpu / ms->smp.threads) % ms->smp.cores,
+ cpu % ms->smp.threads);
+ } else {
+ map_path = g_strdup_printf(
+ "/cpus/cpu-map/socket%d/cluster0/core%d",
+ cpu / ms->smp.cores,
+ cpu % ms->smp.cores);
+ }
+ qemu_fdt_add_path(ms->fdt, map_path);
+ qemu_fdt_setprop_phandle(ms->fdt, map_path, "cpu", cpu_path);
+
+ g_free(map_path);
+ g_free(cpu_path);
+ }
+ }
}
static void fdt_add_its_gic_node(VirtMachineState *vms)