@@ -1332,6 +1332,8 @@ FIELD(CPTR_EL3, TTA, 20, 1)
FIELD(CPTR_EL3, TAM, 30, 1)
FIELD(CPTR_EL3, TCPAC, 31, 1)
+#define MDCR_MCCD (1ULL << 34) /* MDCR_EL3 */
+#define MDCR_HCCD (1U << 23) /* MDCR_EL2 */
#define MDCR_EPMAD (1U << 21)
#define MDCR_EDAD (1U << 20)
#define MDCR_SPME (1U << 17) /* MDCR_EL3 */
@@ -3724,6 +3726,13 @@ static inline bool isar_feature_aa32_pmuv3p4(const ARMISARegisters *id)
FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
}
+static inline bool isar_feature_aa32_pmuv3p5(const ARMISARegisters *id)
+{
+ /* 0xf means "non-standard IMPDEF PMU" */
+ return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 6 &&
+ FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
+}
+
static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id)
{
return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0;
@@ -4048,6 +4057,12 @@ static inline bool isar_feature_aa64_pmuv3p4(const ARMISARegisters *id)
FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
}
+static inline bool isar_feature_aa64_pmuv3p5(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 6 &&
+ FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
+}
+
static inline bool isar_feature_aa64_rcpc_8_3(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) != 0;
@@ -4221,6 +4236,11 @@ static inline bool isar_feature_any_pmuv3p4(const ARMISARegisters *id)
return isar_feature_aa64_pmuv3p4(id) || isar_feature_aa32_pmuv3p4(id);
}
+static inline bool isar_feature_any_pmuv3p5(const ARMISARegisters *id)
+{
+ return isar_feature_aa64_pmuv3p5(id) || isar_feature_aa32_pmuv3p5(id);
+}
+
static inline bool isar_feature_any_ccidx(const ARMISARegisters *id)
{
return isar_feature_aa64_ccidx(id) || isar_feature_aa32_ccidx(id);
@@ -1084,8 +1084,8 @@ static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
* We use these to decide whether we need to wrap a write to MDCR_EL2
* or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls.
*/
-#define MDCR_EL2_PMU_ENABLE_BITS (MDCR_HPME | MDCR_HPMD | MDCR_HPMN)
-#define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME)
+#define MDCR_EL2_PMU_ENABLE_BITS (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD)
+#define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_MCCD)
/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
* the current EL, security state, and register configuration.
@@ -1120,8 +1120,20 @@ static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
}
- if (prohibited && counter == 31) {
- prohibited = env->cp15.c9_pmcr & PMCRDP;
+ if (counter == 31) {
+ /*
+ * The cycle counter defaults to running. PMCR.DP says "disable
+ * the cycle counter when event counting is prohibited".
+ * Some MDCR bits disable the cycle counter specifically.
+ */
+ prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
+ if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+ if (el == 3) {
+ prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_MCCD);
+ } else if (el == 2) {
+ prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
+ }
+ }
}
if (counter == 31) {
FEAT_PMUv3p5 introduces new bits MDCR_EL2.HCCD and MDCR_EL3.SCCD, which disable the cycle counter from counting at EL2 and EL3. Add the code to support these bits. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> --- target/arm/cpu.h | 20 ++++++++++++++++++++ target/arm/helper.c | 20 ++++++++++++++++---- 2 files changed, 36 insertions(+), 4 deletions(-)