@@ -140,13 +140,15 @@ void kvm_arm_destroy_scratch_host_vcpu(int *fdarray);
uint32_t kvm_arm_sve_get_vls(CPUState *cs);
/**
- * kvm_arm_set_cpu_features_from_host:
- * @cpu: ARMCPU to set the features for
+ * kvm_arm_get_host_cpu_features:
+ * @acc: ARMCPUClass to fill in
*
- * Set up the ARMCPU struct fields up to match the information probed
- * from the host CPU.
+ * Probe the capabilities of the host kernel's preferred CPU and fill
+ * in the ARMCPUClass struct accordingly.
+ *
+ * Returns true on success and false otherwise.
*/
-void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu);
+bool kvm_arm_get_host_cpu_features(ARMCPUClass *acc, Error **errp);
/**
* kvm_arm_add_vcpu_properties:
@@ -245,7 +247,7 @@ static inline bool kvm_arm_steal_time_supported(void)
/*
* These functions should never actually be called without KVM support.
*/
-static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
+static inline bool kvm_arm_get_host_cpu_features(ARMCPUClass *c, Error **e)
{
g_assert_not_reached();
}
@@ -1105,11 +1105,21 @@ static void aarch64_host_class_init(ARMCPUClass *acc)
set_class_feature(acc, ARM_FEATURE_PMU);
}
+static bool aarch64_host_class_late_init(ARMCPUClass *acc, Error **errp)
+{
+ if (kvm_enabled()) {
+ return kvm_arm_get_host_cpu_features(acc, errp);
+ }
+ if (hvf_enabled()) {
+ return true;
+ }
+ error_setg(errp, "The 'host' CPU type can only be used with KVM or HVF");
+ return false;
+}
+
static void aarch64_host_object_init(Object *obj)
{
#if defined(CONFIG_KVM)
- ARMCPU *cpu = ARM_CPU(obj);
- kvm_arm_set_cpu_features_from_host(cpu);
aarch64_add_sve_properties(obj);
aarch64_add_pauth_properties(obj);
#elif defined(CONFIG_HVF)
@@ -1134,7 +1144,8 @@ static bool aarch64_max_class_late_init(ARMCPUClass *cpu, Error **errp)
uint32_t u;
if (kvm_enabled() || hvf_enabled()) {
- return true;
+ /* With KVM or HVF, '-cpu max' is identical to '-cpu host' */
+ return aarch64_host_class_late_init(cpu, errp);
}
/*
@@ -1341,6 +1352,7 @@ static const ARMCPUInfo aarch64_cpus[] = {
#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
{ .name = "host",
.class_init = aarch64_host_class_init,
+ .class_late_init = aarch64_host_class_late_init,
.object_init = aarch64_host_object_init },
#endif
};
@@ -44,17 +44,6 @@ static bool cap_has_mp_state;
static bool cap_has_inject_serror_esr;
static bool cap_has_inject_ext_dabt;
-/**
- * ARMHostCPUFeatures: information about the host CPU (identified
- * by asking the host kernel)
- */
-typedef struct ARMHostCPUFeatures {
- ARMISARegisters isar;
- uint32_t target;
-} ARMHostCPUFeatures;
-
-static ARMHostCPUFeatures arm_host_cpu_features;
-
/**
* kvm_arm_vcpu_init:
* @cs: CPUState
@@ -1548,14 +1537,14 @@ static bool kvm_arm_pauth_supported(void)
/**
* kvm_arm_get_host_cpu_features:
- * @ahcf: ARMHostCPUClass to fill in
+ * @acc: ARMCPUClass to fill in
*
* Probe the capabilities of the host kernel's preferred CPU and fill
- * in the ARMHostCPUClass struct accordingly.
+ * in the ARMCPUClass struct accordingly.
*
* Returns true on success and false otherwise.
*/
-static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
+bool kvm_arm_get_host_cpu_features(ARMCPUClass *acc, Error **errp)
{
/* Identify the feature bits corresponding to the host CPU, and
* fill out the ARMHostCPUClass fields accordingly. To do this
@@ -1608,12 +1597,14 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
}
if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
+ error_setg_errno(errp, "Failed to create host vcpu");
+ acc->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
return false;
}
- ahcf->target = init.target;
+ acc->kvm_target = init.target;
- err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0,
+ err = read_sys_reg64(fdarray[2], &acc->isar.id_aa64pfr0,
ARM64_SYS_REG(3, 0, 0, 4, 0));
if (unlikely(err < 0)) {
/*
@@ -1632,26 +1623,26 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* ??? Either of these sounds like too much effort just
* to work around running a modern host kernel.
*/
- ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */
+ acc->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */
err = 0;
} else {
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
+ err |= read_sys_reg64(fdarray[2], &acc->isar.id_aa64pfr1,
ARM64_SYS_REG(3, 0, 0, 4, 1));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0,
+ err |= read_sys_reg64(fdarray[2], &acc->isar.id_aa64smfr0,
ARM64_SYS_REG(3, 0, 0, 4, 5));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0,
+ err |= read_sys_reg64(fdarray[2], &acc->isar.id_aa64dfr0,
ARM64_SYS_REG(3, 0, 0, 5, 0));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1,
+ err |= read_sys_reg64(fdarray[2], &acc->isar.id_aa64dfr1,
ARM64_SYS_REG(3, 0, 0, 5, 1));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
+ err |= read_sys_reg64(fdarray[2], &acc->isar.id_aa64isar0,
ARM64_SYS_REG(3, 0, 0, 6, 0));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
+ err |= read_sys_reg64(fdarray[2], &acc->isar.id_aa64isar1,
ARM64_SYS_REG(3, 0, 0, 6, 1));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
+ err |= read_sys_reg64(fdarray[2], &acc->isar.id_aa64mmfr0,
ARM64_SYS_REG(3, 0, 0, 7, 0));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
+ err |= read_sys_reg64(fdarray[2], &acc->isar.id_aa64mmfr1,
ARM64_SYS_REG(3, 0, 0, 7, 1));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2,
+ err |= read_sys_reg64(fdarray[2], &acc->isar.id_aa64mmfr2,
ARM64_SYS_REG(3, 0, 0, 7, 2));
/*
@@ -1661,48 +1652,48 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* than skipping the reads and leaving 0, as we must avoid
* considering the values in every case.
*/
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_pfr0,
ARM64_SYS_REG(3, 0, 0, 1, 0));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_pfr1,
ARM64_SYS_REG(3, 0, 0, 1, 1));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_dfr0,
ARM64_SYS_REG(3, 0, 0, 1, 2));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_mmfr0,
ARM64_SYS_REG(3, 0, 0, 1, 4));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_mmfr1,
ARM64_SYS_REG(3, 0, 0, 1, 5));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_mmfr2,
ARM64_SYS_REG(3, 0, 0, 1, 6));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_mmfr3,
ARM64_SYS_REG(3, 0, 0, 1, 7));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_isar0,
ARM64_SYS_REG(3, 0, 0, 2, 0));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_isar1,
ARM64_SYS_REG(3, 0, 0, 2, 1));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_isar2,
ARM64_SYS_REG(3, 0, 0, 2, 2));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_isar3,
ARM64_SYS_REG(3, 0, 0, 2, 3));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_isar4,
ARM64_SYS_REG(3, 0, 0, 2, 4));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_isar5,
ARM64_SYS_REG(3, 0, 0, 2, 5));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_mmfr4,
ARM64_SYS_REG(3, 0, 0, 2, 6));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_isar6,
ARM64_SYS_REG(3, 0, 0, 2, 7));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.mvfr0,
ARM64_SYS_REG(3, 0, 0, 3, 0));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.mvfr1,
ARM64_SYS_REG(3, 0, 0, 3, 1));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.mvfr2,
ARM64_SYS_REG(3, 0, 0, 3, 2));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_pfr2,
ARM64_SYS_REG(3, 0, 0, 3, 4));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_dfr1,
ARM64_SYS_REG(3, 0, 0, 3, 5));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5,
+ err |= read_sys_reg32(fdarray[2], &acc->isar.id_mmfr5,
ARM64_SYS_REG(3, 0, 0, 3, 6));
/*
@@ -1715,14 +1706,14 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* arch/arm64/kvm/sys_regs.c:trap_dbgidr() does.
* We only do this if the CPU supports AArch32 at EL1.
*/
- if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) {
- int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS);
- int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS);
+ if (FIELD_EX32(acc->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) {
+ int wrps = FIELD_EX64(acc->isar.id_aa64dfr0, ID_AA64DFR0, WRPS);
+ int brps = FIELD_EX64(acc->isar.id_aa64dfr0, ID_AA64DFR0, BRPS);
int ctx_cmps =
- FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS);
+ FIELD_EX64(acc->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS);
int version = 6; /* ARMv8 debug architecture */
bool has_el3 =
- !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3);
+ !!FIELD_EX32(acc->isar.id_aa64pfr0, ID_AA64PFR0, EL3);
uint32_t dbgdidr = 0;
dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps);
@@ -1732,12 +1723,12 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3);
dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3);
dbgdidr |= (1 << 15); /* RES1 bit */
- ahcf->isar.dbgdidr = dbgdidr;
+ acc->isar.dbgdidr = dbgdidr;
}
if (pmu_supported) {
/* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0,
+ err |= read_sys_reg64(fdarray[2], &acc->isar.reset_pmcr_el0,
ARM64_SYS_REG(3, 3, 9, 12, 0));
}
@@ -1749,7 +1740,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* enabled SVE support, which resulted in an error rather than RAZ.
* So only read the register if we set KVM_ARM_VCPU_SVE above.
*/
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
+ err |= read_sys_reg64(fdarray[2], &acc->isar.id_aa64zfr0,
ARM64_SYS_REG(3, 0, 0, 4, 4));
}
}
@@ -1759,24 +1750,6 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
return !err;
}
-void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
-{
- if (!arm_host_cpu_features.isar.id_aa64pfr0) {
- if (!kvm_enabled() ||
- !kvm_arm_get_host_cpu_features(&arm_host_cpu_features)) {
- /* We can't report this error yet, so flag that we need to
- * in arm_cpu_realizefn().
- */
- cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
- cpu->host_cpu_probe_failed = true;
- return;
- }
- }
-
- cpu->kvm_target = arm_host_cpu_features.target;
- cpu->isar = arm_host_cpu_features.isar;
-}
-
/**
* kvm_arm_steal_time_supported:
*
We can now store these values into ARMCPUClass instead of into a temporary ARMHostCPUFeatures structure. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/arm/kvm_arm.h | 14 ++--- target/arm/cpu64.c | 18 +++++-- target/arm/kvm.c | 119 +++++++++++++++++-------------------------- 3 files changed, 69 insertions(+), 82 deletions(-)