@@ -1314,6 +1314,7 @@ void pmu_init(ARMCPU *cpu);
#define SCTLR_EnIB (1U << 30) /* v8.3, AArch64 only */
#define SCTLR_EnIA (1U << 31) /* v8.3, AArch64 only */
#define SCTLR_DSSBS_32 (1U << 31) /* v8.5, AArch32 only */
+#define SCTLR_MSCEN (1ULL << 33) /* FEAT_MOPS */
#define SCTLR_BT0 (1ULL << 35) /* v8.5-BTI */
#define SCTLR_BT1 (1ULL << 36) /* v8.5-BTI */
#define SCTLR_ITFSB (1ULL << 37) /* v8.5-MemTag */
@@ -4224,6 +4225,11 @@ static inline bool isar_feature_aa64_doublelock(const ARMISARegisters *id)
return FIELD_SEX64(id->id_aa64dfr0, ID_AA64DFR0, DOUBLELOCK) >= 0;
}
+static inline bool isar_feature_aa64_mops(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, MOPS);
+}
+
/*
* Feature tests for "does this exist in either 32-bit or 64-bit?"
*/
@@ -5980,7 +5980,10 @@ static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
uint64_t valid_mask = 0;
- /* No features adding bits to HCRX are implemented. */
+ /* FEAT_MOPS adds MSCEn and MCE2 */
+ if (cpu_isar_feature(aa64_mops, env_archcpu(env))) {
+ valid_mask |= HCRX_MSCEN | HCRX_MCE2;
+ }
/* Clear RES0 bits. */
env->cp15.hcrx_el2 = value & valid_mask;
@@ -6009,13 +6012,24 @@ uint64_t arm_hcrx_el2_eff(CPUARMState *env)
{
/*
* The bits in this register behave as 0 for all purposes other than
- * direct reads of the register if:
- * - EL2 is not enabled in the current security state,
- * - SCR_EL3.HXEn is 0.
+ * direct reads of the register if SCR_EL3.HXEn is 0.
+ * If EL2 is not enabled in the current security state, then the
+ * bit may behave as if 0, or as if 1, depending on the bit.
+ * For the moment, we treat the EL2-disabled case as taking
+ * priority over the HXEn-disabled case. This is true for the only
+ * bit for a feature which we implement where the answer is different
+ * for the two cases (MSCEn for FEAT_MOPS).
+ * This may need to be revisited for future bits.
*/
- if (!arm_is_el2_enabled(env)
- || (arm_feature(env, ARM_FEATURE_EL3)
- && !(env->cp15.scr_el3 & SCR_HXEN))) {
+ if (!arm_is_el2_enabled(env)) {
+ uint64_t hcrx = 0;
+ if (cpu_isar_feature(aa64_mops, env_archcpu(env))) {
+ /* MSCEn behaves as 1 if EL2 is not enabled */
+ hcrx |= HCRX_MSCEN;
+ }
+ return hcrx;
+ }
+ if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) {
return 0;
}
return env->cp15.hcrx_el2;
FEAT_MOPS defines a handful of new enable bits: * HCRX_EL2.MSCEn, SCTLR_EL1.MSCEn, SCTLR_EL2.MSCen: define whether the new insns should UNDEF or not * HCRX_EL2.MCE2: defines whether memops exceptions from EL1 should be taken to EL1 or EL2 Since we don't sanitise what bits can be written for the SCTLR registers, we only need to handle the new bits in HCRX_EL2, and define SCTLR_MSCEN for the new SCTLR bit value. The precedence of "HCRX bits acts as 0 if SCR_EL3.HXEn is 0" versus "bit acts as 1 if EL2 disabled" is not clear from the register definition text, but it is clear in the CheckMOPSEnabled() pseudocode(), so we follow that. We'll have to check whether other bits we need to implement in future follow the same logic or not. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> --- target/arm/cpu.h | 6 ++++++ target/arm/helper.c | 28 +++++++++++++++++++++------- 2 files changed, 27 insertions(+), 7 deletions(-)