@@ -227,6 +227,19 @@ void arm_gt_vtimer_cb(void *opaque);
void arm_gt_htimer_cb(void *opaque);
void arm_gt_stimer_cb(void *opaque);
+#define ARM_AFF0_SHIFT 0
+#define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT)
+#define ARM_AFF1_SHIFT 8
+#define ARM_AFF1_MASK (0xFFULL << ARM_AFF1_SHIFT)
+#define ARM_AFF2_SHIFT 16
+#define ARM_AFF2_MASK (0xFFULL << ARM_AFF2_SHIFT)
+#define ARM_AFF3_SHIFT 32
+#define ARM_AFF3_MASK (0xFFULL << ARM_AFF3_SHIFT)
+
+#define ARM32_AFFINITY_MASK (ARM_AFF0_MASK|ARM_AFF1_MASK|ARM_AFF2_MASK)
+#define ARM64_AFFINITY_MASK \
+ (ARM_AFF0_MASK|ARM_AFF1_MASK|ARM_AFF2_MASK|ARM_AFF3_MASK)
+
#ifdef TARGET_AARCH64
int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
@@ -456,7 +456,7 @@ static void arm_cpu_initfn(Object *obj)
*/
Aff1 = cs->cpu_index / ARM_CPUS_PER_CLUSTER;
Aff0 = cs->cpu_index % ARM_CPUS_PER_CLUSTER;
- cpu->mp_affinity = (Aff1 << 8) | Aff0;
+ cpu->mp_affinity = (Aff1 << ARM_AFF1_SHIFT) | Aff0;
#ifndef CONFIG_USER_ONLY
/* Our inbound IRQ and FIQ lines */
@@ -181,7 +181,6 @@ int kvm_arm_cpreg_level(uint64_t regidx)
return KVM_PUT_RUNTIME_STATE;
}
-#define ARM_MPIDR_HWID_BITMASK 0xFFFFFF
#define ARM_CPU_ID_MPIDR 0, 0, 0, 5
int kvm_arch_init_vcpu(CPUState *cs)
@@ -234,7 +233,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (ret) {
return ret;
}
- cpu->mp_affinity = mpidr & ARM_MPIDR_HWID_BITMASK;
+ cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK;
return kvm_arm_init_cpreg_list(cpu);
}
@@ -77,7 +77,6 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
return true;
}
-#define ARM_MPIDR_HWID_BITMASK 0xFF00FFFFFFULL
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
int kvm_arch_init_vcpu(CPUState *cs)
@@ -120,7 +119,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (ret) {
return ret;
}
- cpu->mp_affinity = mpidr & ARM_MPIDR_HWID_BITMASK;
+ cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
return kvm_arm_init_cpreg_list(cpu);
}