@@ -2787,7 +2787,8 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
* + NonSecure PL1 & 0 stage 1
* + NonSecure PL1 & 0 stage 2
* + NonSecure PL2
- * + Secure PL1 & 0
+ * + Secure PL0
+ * + Secure PL1
* (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
*
* For QEMU, an mmu_idx is not quite the same as a translation regime because:
@@ -2805,39 +2806,37 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
* The only use of stage 2 translations is either as part of an s1+2
* lookup or when loading the descriptors during a stage 1 page table walk,
* and in both those cases we don't use the TLB.
- * 4. we want to be able to use the TLB for accesses done as part of a
+ * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
+ * translation regimes, because they map reasonably well to each other
+ * and they can't both be active at the same time.
+ * 5. we want to be able to use the TLB for accesses done as part of a
* stage1 page table walk, rather than having to walk the stage2 page
* table over and over.
- * 5. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
+ * 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
* Never (PAN) bit within PSTATE.
- * 6. we fold together most secure and non-secure regimes for A-profile,
+ * 7. we fold together most secure and non-secure regimes for A-profile,
* because there are no banked system registers for aarch64, so the
* process of switching between secure and non-secure is
* already heavyweight.
- * 7. we cannot fold together Stage 2 Secure and Stage 2 NonSecure,
+ * 8. we cannot fold together Stage 2 Secure and Stage 2 NonSecure,
* because both are in use simultaneously for Secure EL2.
*
* This gives us the following list of cases:
*
- * EL0 EL1&0 stage 1+2 (or AArch32 PL0 PL1&0 stage 1+2)
- * EL1 EL1&0 stage 1+2 (or AArch32 PL1 PL1&0 stage 1+2)
- * EL1 EL1&0 stage 1+2 +PAN (or AArch32 PL1 PL1&0 stage 1+2 +PAN)
+ * EL0 EL1&0 stage 1+2 (aka NS PL0)
+ * EL1 EL1&0 stage 1+2 (aka NS PL1)
+ * EL1 EL1&0 stage 1+2 +PAN
* EL0 EL2&0
* EL2 EL2&0
* EL2 EL2&0 +PAN
* EL2 (aka NS PL2)
- * EL3 (not used when EL3 is AArch32)
+ * EL3 (aka S PL1)
* Stage2 Secure
* Stage2 NonSecure
* plus one TLB per Physical address space: S, NS, Realm, Root
*
* for a total of 14 different mmu_idx.
*
- * Note that when EL3 is AArch32, the usage is potentially confusing
- * because the MMU indexes are named for their AArch64 use, so code
- * using the ARMMMUIdx_E10_1 might be at EL3, not EL1. This is because
- * Secure PL1 is always at EL3.
- *
* R profile CPUs have an MPU, but can use the same set of MMU indexes
* as A profile. They only need to distinguish EL0 and EL1 (and
* EL2 for cores like the Cortex-R52).
@@ -3130,10 +3129,6 @@ FIELD(TBFLAG_A32, NS, 10, 1)
* This requires an SME trap from AArch32 mode when using NEON.
*/
FIELD(TBFLAG_A32, SME_TRAP_NONSTREAMING, 11, 1)
-/*
- * Indicates whether we are in the Secure PL1&0 translation regime
- */
-FIELD(TBFLAG_A32, S_PL1_0, 12, 1)
/*
* Bit usage when in AArch32 state, for M-profile only.
@@ -275,20 +275,6 @@ FIELD(CNTHCTL, CNTPMASK, 19, 1)
#define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
#define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
-/**
- * arm_aa32_secure_pl1_0(): Return true if in Secure PL1&0 regime
- *
- * Return true if the CPU is in the Secure PL1&0 translation regime.
- * This requires that EL3 exists and is AArch32 and we are currently
- * Secure. If this is the case then the ARMMMUIdx_E10* apply and
- * mean we are in EL3, not EL1.
- */
-static inline bool arm_aa32_secure_pl1_0(CPUARMState *env)
-{
- return arm_feature(env, ARM_FEATURE_EL3) &&
- !arm_el_is_aa64(env, 3) && arm_is_secure(env);
-}
-
/**
* raise_exception: Raise the specified exception.
* Raise a guest exception with the specified value, syndrome register
@@ -841,12 +827,7 @@ static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
return mmu_idx | ARM_MMU_IDX_A;
}
-/**
- * Return the exception level we're running at if our current MMU index
- * is @mmu_idx. @s_pl1_0 should be true if this is the AArch32
- * Secure PL1&0 translation regime.
- */
-int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx, bool s_pl1_0);
+int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
/* Return the MMU index for a v7M CPU in the specified security state */
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
@@ -941,11 +922,11 @@ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
return 3;
case ARMMMUIdx_E10_0:
case ARMMMUIdx_Stage1_E0:
- case ARMMMUIdx_E10_1:
- case ARMMMUIdx_E10_1_PAN:
+ return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
- return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
case ARMMMUIdx_MPrivNegPri:
case ARMMMUIdx_MUserNegPri:
case ARMMMUIdx_MPriv:
@@ -165,8 +165,6 @@ typedef struct DisasContext {
uint8_t gm_blocksize;
/* True if the current insn_start has been updated. */
bool insn_start_updated;
- /* True if this is the AArch32 Secure PL1&0 translation regime */
- bool s_pl1_0;
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
int c15_cpar;
/* Offset from VNCR_EL2 when FEAT_NV2 redirects this reg to memory */
@@ -3701,7 +3701,7 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
*/
format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
- if (arm_feature(env, ARM_FEATURE_EL2) && !arm_aa32_secure_pl1_0(env)) {
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
if (mmu_idx == ARMMMUIdx_E10_0 ||
mmu_idx == ARMMMUIdx_E10_1 ||
mmu_idx == ARMMMUIdx_E10_1_PAN) {
@@ -3775,11 +3775,13 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
case 0:
/* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
switch (el) {
+ case 3:
+ mmu_idx = ARMMMUIdx_E3;
+ break;
case 2:
g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
/* fall through */
case 1:
- case 3:
if (ri->crm == 9 && arm_pan_enabled(env)) {
mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
} else {
@@ -11860,11 +11862,8 @@ void arm_cpu_do_interrupt(CPUState *cs)
uint64_t arm_sctlr(CPUARMState *env, int el)
{
- if (arm_aa32_secure_pl1_0(env)) {
- /* In Secure PL1&0 SCTLR_S is always controlling */
- el = 3;
- } else if (el == 0) {
- /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
+ /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
+ if (el == 0) {
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
}
@@ -12524,12 +12523,8 @@ int fp_exception_el(CPUARMState *env, int cur_el)
return 0;
}
-/*
- * Return the exception level we're running at if this is our mmu_idx.
- * s_pl1_0 should be true if this is the AArch32 Secure PL1&0 translation
- * regime.
- */
-int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx, bool s_pl1_0)
+/* Return the exception level we're running at if this is our mmu_idx */
+int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
{
if (mmu_idx & ARM_MMU_IDX_M) {
return mmu_idx & ARM_MMU_IDX_M_PRIV;
@@ -12541,7 +12536,7 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx, bool s_pl1_0)
return 0;
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
- return s_pl1_0 ? 3 : 1;
+ return 1;
case ARMMMUIdx_E2:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
@@ -12579,15 +12574,6 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
idx = ARMMMUIdx_E10_0;
}
break;
- case 3:
- /*
- * AArch64 EL3 has its own translation regime; AArch32 EL3
- * uses the Secure PL1&0 translation regime.
- */
- if (arm_el_is_aa64(env, 3)) {
- return ARMMMUIdx_E3;
- }
- /* fall through */
case 1:
if (arm_pan_enabled(env)) {
idx = ARMMMUIdx_E10_1_PAN;
@@ -12607,6 +12593,8 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
idx = ARMMMUIdx_E2;
}
break;
+ case 3:
+ return ARMMMUIdx_E3;
default:
g_assert_not_reached();
}
@@ -3607,11 +3607,7 @@ bool get_phys_addr(CPUARMState *env, vaddr address,
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
case ARMMMUIdx_E2:
- if (arm_aa32_secure_pl1_0(env)) {
- ss = ARMSS_Secure;
- } else {
- ss = arm_security_space_below_el3(env);
- }
+ ss = arm_security_space_below_el3(env);
break;
case ARMMMUIdx_Stage2:
/*
@@ -198,10 +198,6 @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
DP_TBFLAG_A32(flags, SME_TRAP_NONSTREAMING, 1);
}
- if (arm_aa32_secure_pl1_0(env)) {
- DP_TBFLAG_A32(flags, S_PL1_0, 1);
- }
-
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
}
@@ -11690,7 +11690,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
dc->tbii = EX_TBFLAG_A64(tb_flags, TBII);
dc->tbid = EX_TBFLAG_A64(tb_flags, TBID);
dc->tcma = EX_TBFLAG_A64(tb_flags, TCMA);
- dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx, false);
+ dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY)
dc->user = (dc->current_el == 0);
#endif
@@ -7546,6 +7546,10 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
core_mmu_idx = EX_TBFLAG_ANY(tb_flags, MMUIDX);
dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
+ dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
+#if !defined(CONFIG_USER_ONLY)
+ dc->user = (dc->current_el == 0);
+#endif
dc->fp_excp_el = EX_TBFLAG_ANY(tb_flags, FPEXC_EL);
dc->align_mem = EX_TBFLAG_ANY(tb_flags, ALIGN_MEM);
dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL);
@@ -7576,12 +7580,7 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
}
dc->sme_trap_nonstreaming =
EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING);
- dc->s_pl1_0 = EX_TBFLAG_A32(tb_flags, S_PL1_0);
}
- dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx, dc->s_pl1_0);
-#if !defined(CONFIG_USER_ONLY)
- dc->user = (dc->current_el == 0);
-#endif
dc->lse2 = false; /* applies only to aarch64 */
dc->cp_regs = cpu->cp_regs;
dc->features = env->features;