@@ -32,6 +32,6 @@
# define TARGET_PAGE_BITS_MIN 10
#endif
-#define NB_MMU_MODES 15
+#define NB_MMU_MODES 8
#endif
@@ -2873,26 +2873,26 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
* table over and over.
* 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
* Never (PAN) bit within PSTATE.
+ * 7. we fold together the secure and non-secure regimes for A-profile,
+ * because there are no banked system registers, so the process of
+ * switching between secure and non-secure is already heavyweight.
*
* This gives us the following list of cases:
*
- * NS EL0 EL1&0 stage 1+2 (aka NS PL0)
- * NS EL1 EL1&0 stage 1+2 (aka NS PL1)
- * NS EL1 EL1&0 stage 1+2 +PAN
- * NS EL0 EL2&0
- * NS EL2 EL2&0
- * NS EL2 EL2&0 +PAN
- * NS EL2 (aka NS PL2)
- * S EL0 EL1&0 (aka S PL0)
- * S EL1 EL1&0 (not used if EL3 is 32 bit)
- * S EL1 EL1&0 +PAN
- * S EL3 (aka S PL1)
+ * EL0 EL1&0 stage 1+2 (aka NS PL0)
+ * EL1 EL1&0 stage 1+2 (aka NS PL1)
+ * EL1 EL1&0 stage 1+2 +PAN
+ * EL0 EL2&0
+ * EL2 EL2&0
+ * EL2 EL2&0 +PAN
+ * EL2 (aka NS PL2)
+ * EL3 (aka S PL1)
*
* for a total of 11 different mmu_idx.
*
* R profile CPUs have an MPU, but can use the same set of MMU indexes
- * as A profile. They only need to distinguish NS EL0 and NS EL1 (and
- * NS EL2 if we ever model a Cortex-R52).
+ * as A profile. They only need to distinguish EL0 and EL1 (and
+ * EL2 if we ever model a Cortex-R52).
*
* M profile CPUs are rather different as they do not have a true MMU.
* They have the following different MMU indexes:
@@ -2931,9 +2931,6 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
#define ARM_MMU_IDX_M 0x40 /* M profile */
-/* Meanings of the bits for A profile mmu idx values */
-#define ARM_MMU_IDX_A_NS 0x8
-
/* Meanings of the bits for M profile mmu idx values */
#define ARM_MMU_IDX_M_PRIV 0x1
#define ARM_MMU_IDX_M_NEGPRI 0x2
@@ -2947,22 +2944,14 @@ typedef enum ARMMMUIdx {
/*
* A-profile.
*/
- ARMMMUIdx_SE10_0 = 0 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE20_0 = 1 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE10_1 = 2 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE20_2 = 3 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE10_1_PAN = 4 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE20_2_PAN = 5 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE2 = 6 | ARM_MMU_IDX_A,
- ARMMMUIdx_SE3 = 7 | ARM_MMU_IDX_A,
-
- ARMMMUIdx_E10_0 = ARMMMUIdx_SE10_0 | ARM_MMU_IDX_A_NS,
- ARMMMUIdx_E20_0 = ARMMMUIdx_SE20_0 | ARM_MMU_IDX_A_NS,
- ARMMMUIdx_E10_1 = ARMMMUIdx_SE10_1 | ARM_MMU_IDX_A_NS,
- ARMMMUIdx_E20_2 = ARMMMUIdx_SE20_2 | ARM_MMU_IDX_A_NS,
- ARMMMUIdx_E10_1_PAN = ARMMMUIdx_SE10_1_PAN | ARM_MMU_IDX_A_NS,
- ARMMMUIdx_E20_2_PAN = ARMMMUIdx_SE20_2_PAN | ARM_MMU_IDX_A_NS,
- ARMMMUIdx_E2 = ARMMMUIdx_SE2 | ARM_MMU_IDX_A_NS,
+ ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_2 = 3 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E10_1_PAN = 4 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
/*
* These are not allocated TLBs and are used only for AT system
@@ -2971,9 +2960,6 @@ typedef enum ARMMMUIdx {
ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
- ARMMMUIdx_Stage1_SE0 = 3 | ARM_MMU_IDX_NOTLB,
- ARMMMUIdx_Stage1_SE1 = 4 | ARM_MMU_IDX_NOTLB,
- ARMMMUIdx_Stage1_SE1_PAN = 5 | ARM_MMU_IDX_NOTLB,
/*
* Not allocated a TLB: used only for second stage of an S12 page
* table walk, or for descriptor loads during first stage of an S1
@@ -2981,8 +2967,8 @@ typedef enum ARMMMUIdx {
* then various TLB flush insns which currently are no-ops or flush
* only stage 1 MMU indexes will need to change to flush stage 2.
*/
- ARMMMUIdx_Stage2 = 6 | ARM_MMU_IDX_NOTLB,
- ARMMMUIdx_Stage2_S = 7 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage2 = 3 | ARM_MMU_IDX_NOTLB,
+ ARMMMUIdx_Stage2_S = 4 | ARM_MMU_IDX_NOTLB,
/*
* M-profile.
@@ -3012,14 +2998,7 @@ typedef enum ARMMMUIdxBit {
TO_CORE_BIT(E2),
TO_CORE_BIT(E20_2),
TO_CORE_BIT(E20_2_PAN),
- TO_CORE_BIT(SE10_0),
- TO_CORE_BIT(SE20_0),
- TO_CORE_BIT(SE10_1),
- TO_CORE_BIT(SE20_2),
- TO_CORE_BIT(SE10_1_PAN),
- TO_CORE_BIT(SE20_2_PAN),
- TO_CORE_BIT(SE2),
- TO_CORE_BIT(SE3),
+ TO_CORE_BIT(E3),
TO_CORE_BIT(MUser),
TO_CORE_BIT(MPriv),
@@ -649,21 +649,12 @@ static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
case ARMMMUIdx_Stage1_E0:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
- case ARMMMUIdx_Stage1_SE0:
- case ARMMMUIdx_Stage1_SE1:
- case ARMMMUIdx_Stage1_SE1_PAN:
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
case ARMMMUIdx_E20_0:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
- case ARMMMUIdx_SE10_0:
- case ARMMMUIdx_SE10_1:
- case ARMMMUIdx_SE10_1_PAN:
- case ARMMMUIdx_SE20_0:
- case ARMMMUIdx_SE20_2:
- case ARMMMUIdx_SE20_2_PAN:
return true;
default:
return false;
@@ -674,11 +665,8 @@ static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
{
switch (mmu_idx) {
case ARMMMUIdx_Stage1_E1_PAN:
- case ARMMMUIdx_Stage1_SE1_PAN:
case ARMMMUIdx_E10_1_PAN:
case ARMMMUIdx_E20_2_PAN:
- case ARMMMUIdx_SE10_1_PAN:
- case ARMMMUIdx_SE20_2_PAN:
return true;
default:
return false;
@@ -689,30 +677,20 @@ static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
{
switch (mmu_idx) {
- case ARMMMUIdx_SE20_0:
- case ARMMMUIdx_SE20_2:
- case ARMMMUIdx_SE20_2_PAN:
case ARMMMUIdx_E20_0:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
case ARMMMUIdx_Stage2:
case ARMMMUIdx_Stage2_S:
- case ARMMMUIdx_SE2:
case ARMMMUIdx_E2:
return 2;
- case ARMMMUIdx_SE3:
+ case ARMMMUIdx_E3:
return 3;
- case ARMMMUIdx_SE10_0:
- case ARMMMUIdx_Stage1_SE0:
- return arm_el_is_aa64(env, 3) ? 1 : 3;
- case ARMMMUIdx_SE10_1:
- case ARMMMUIdx_SE10_1_PAN:
+ case ARMMMUIdx_E10_0:
case ARMMMUIdx_Stage1_E0:
+ return arm_el_is_aa64(env, 3) || !arm_is_secure_below_el3(env) ? 1 : 3;
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
- case ARMMMUIdx_Stage1_SE1:
- case ARMMMUIdx_Stage1_SE1_PAN:
- case ARMMMUIdx_E10_0:
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
case ARMMMUIdx_MPrivNegPri:
@@ -954,9 +932,6 @@ static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
case ARMMMUIdx_Stage1_E0:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
- case ARMMMUIdx_Stage1_SE0:
- case ARMMMUIdx_Stage1_SE1:
- case ARMMMUIdx_Stage1_SE1_PAN:
return true;
default:
return false;
@@ -1671,6 +1671,7 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
/* Begin with base v8.0 state. */
uint32_t valid_mask = 0x3fff;
ARMCPU *cpu = env_archcpu(env);
+ uint64_t changed;
/*
* Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always
@@ -1730,7 +1731,22 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
/* Clear all-context RES0 bits. */
value &= valid_mask;
- raw_write(env, ri, value);
+ changed = env->cp15.scr_el3 ^ value;
+ env->cp15.scr_el3 = value;
+
+ /*
+ * If SCR_EL3.NS changes, i.e. arm_is_secure_below_el3, then
+ * we must invalidate all TLBs below EL3.
+ */
+ if (changed & SCR_NS) {
+ tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
+ ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E10_1 |
+ ARMMMUIdxBit_E20_2 |
+ ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E20_2_PAN |
+ ARMMMUIdxBit_E2));
+ }
}
static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
@@ -2561,9 +2577,6 @@ static int gt_phys_redir_timeridx(CPUARMState *env)
case ARMMMUIdx_E20_0:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
- case ARMMMUIdx_SE20_0:
- case ARMMMUIdx_SE20_2:
- case ARMMMUIdx_SE20_2_PAN:
return GTIMER_HYP;
default:
return GTIMER_PHYS;
@@ -2576,9 +2589,6 @@ static int gt_virt_redir_timeridx(CPUARMState *env)
case ARMMMUIdx_E20_0:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
- case ARMMMUIdx_SE20_0:
- case ARMMMUIdx_SE20_2:
- case ARMMMUIdx_SE20_2_PAN:
return GTIMER_HYPVIRT;
default:
return GTIMER_VIRT;
@@ -3286,7 +3296,7 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
/* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
switch (el) {
case 3:
- mmu_idx = ARMMMUIdx_SE3;
+ mmu_idx = ARMMMUIdx_E3;
secure = true;
break;
case 2:
@@ -3294,10 +3304,9 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
/* fall through */
case 1:
if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) {
- mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
- : ARMMMUIdx_Stage1_E1_PAN);
+ mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
} else {
- mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
+ mmu_idx = ARMMMUIdx_Stage1_E1;
}
break;
default:
@@ -3308,7 +3317,7 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
/* stage 1 current state PL0: ATS1CUR, ATS1CUW */
switch (el) {
case 3:
- mmu_idx = ARMMMUIdx_SE10_0;
+ mmu_idx = ARMMMUIdx_E10_0;
secure = true;
break;
case 2:
@@ -3316,7 +3325,7 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
mmu_idx = ARMMMUIdx_Stage1_E0;
break;
case 1:
- mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
+ mmu_idx = ARMMMUIdx_Stage1_E0;
break;
default:
g_assert_not_reached();
@@ -3385,17 +3394,16 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
switch (ri->opc1) {
case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) {
- mmu_idx = (secure ? ARMMMUIdx_Stage1_SE1_PAN
- : ARMMMUIdx_Stage1_E1_PAN);
+ mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
} else {
- mmu_idx = secure ? ARMMMUIdx_Stage1_SE1 : ARMMMUIdx_Stage1_E1;
+ mmu_idx = ARMMMUIdx_Stage1_E1;
}
break;
case 4: /* AT S1E2R, AT S1E2W */
- mmu_idx = secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2;
+ mmu_idx = ARMMMUIdx_E2;
break;
case 6: /* AT S1E3R, AT S1E3W */
- mmu_idx = ARMMMUIdx_SE3;
+ mmu_idx = ARMMMUIdx_E3;
secure = true;
break;
default:
@@ -3403,13 +3411,13 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
}
break;
case 2: /* AT S1E0R, AT S1E0W */
- mmu_idx = secure ? ARMMMUIdx_Stage1_SE0 : ARMMMUIdx_Stage1_E0;
+ mmu_idx = ARMMMUIdx_Stage1_E0;
break;
case 4: /* AT S12E1R, AT S12E1W */
- mmu_idx = secure ? ARMMMUIdx_SE10_1 : ARMMMUIdx_E10_1;
+ mmu_idx = ARMMMUIdx_E10_1;
break;
case 6: /* AT S12E0R, AT S12E0W */
- mmu_idx = secure ? ARMMMUIdx_SE10_0 : ARMMMUIdx_E10_0;
+ mmu_idx = ARMMMUIdx_E10_0;
break;
default:
g_assert_not_reached();
@@ -3679,11 +3687,6 @@ static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint16_t mask = ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
ARMMMUIdxBit_E20_0;
-
- if (arm_is_secure_below_el3(env)) {
- mask >>= ARM_MMU_IDX_A_NS;
- }
-
tlb_flush_by_mmuidx(env_cpu(env), mask);
}
raw_write(env, ri, value);
@@ -3703,11 +3706,6 @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint16_t mask = ARMMMUIdxBit_E10_1 |
ARMMMUIdxBit_E10_1_PAN |
ARMMMUIdxBit_E10_0;
-
- if (arm_is_secure_below_el3(env)) {
- mask >>= ARM_MMU_IDX_A_NS;
- }
-
tlb_flush_by_mmuidx(cs, mask);
raw_write(env, ri, value);
}
@@ -4178,11 +4176,6 @@ static int vae1_tlbmask(CPUARMState *env)
ARMMMUIdxBit_E10_1_PAN |
ARMMMUIdxBit_E10_0;
}
-
- if (arm_is_secure_below_el3(env)) {
- mask >>= ARM_MMU_IDX_A_NS;
- }
-
return mask;
}
@@ -4209,10 +4202,6 @@ static int vae1_tlbbits(CPUARMState *env, uint64_t addr)
mmu_idx = ARMMMUIdx_E10_0;
}
- if (arm_is_secure_below_el3(env)) {
- mmu_idx &= ~ARM_MMU_IDX_A_NS;
- }
-
return tlbbits_for_regime(env, mmu_idx, addr);
}
@@ -4245,30 +4234,17 @@ static int alle1_tlbmask(CPUARMState *env)
* stage 2 translations, whereas most other scopes only invalidate
* stage 1 translations.
*/
- if (arm_is_secure_below_el3(env)) {
- return ARMMMUIdxBit_SE10_1 |
- ARMMMUIdxBit_SE10_1_PAN |
- ARMMMUIdxBit_SE10_0;
- } else {
- return ARMMMUIdxBit_E10_1 |
- ARMMMUIdxBit_E10_1_PAN |
- ARMMMUIdxBit_E10_0;
- }
+ return (ARMMMUIdxBit_E10_1 |
+ ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_0);
}
static int e2_tlbmask(CPUARMState *env)
{
- if (arm_is_secure_below_el3(env)) {
- return ARMMMUIdxBit_SE20_0 |
- ARMMMUIdxBit_SE20_2 |
- ARMMMUIdxBit_SE20_2_PAN |
- ARMMMUIdxBit_SE2;
- } else {
- return ARMMMUIdxBit_E20_0 |
- ARMMMUIdxBit_E20_2 |
- ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E2;
- }
+ return (ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_2 |
+ ARMMMUIdxBit_E20_2_PAN |
+ ARMMMUIdxBit_E2);
}
static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -4295,7 +4271,7 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
ARMCPU *cpu = env_archcpu(env);
CPUState *cs = CPU(cpu);
- tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_SE3);
+ tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3);
}
static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -4321,7 +4297,7 @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = env_cpu(env);
- tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_SE3);
+ tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3);
}
static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -4349,7 +4325,7 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_SE3);
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3);
}
static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -4388,12 +4364,10 @@ static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = env_cpu(env);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- bool secure = arm_is_secure_below_el3(env);
- int mask = secure ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2;
- int bits = tlbbits_for_regime(env, secure ? ARMMMUIdx_SE2 : ARMMMUIdx_E2,
- pageaddr);
+ int bits = tlbbits_for_regime(env, ARMMMUIdx_E2, pageaddr);
- tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits);
+ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
+ ARMMMUIdxBit_E2, bits);
}
static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -4401,10 +4375,10 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
CPUState *cs = env_cpu(env);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- int bits = tlbbits_for_regime(env, ARMMMUIdx_SE3, pageaddr);
+ int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr);
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr,
- ARMMMUIdxBit_SE3, bits);
+ ARMMMUIdxBit_E3, bits);
}
#ifdef TARGET_AARCH64
@@ -4510,8 +4484,7 @@ static void tlbi_aa64_rvae1is_write(CPUARMState *env,
static int vae2_tlbmask(CPUARMState *env)
{
- return (arm_is_secure_below_el3(env)
- ? ARMMMUIdxBit_SE2 : ARMMMUIdxBit_E2);
+ return ARMMMUIdxBit_E2;
}
static void tlbi_aa64_rvae2_write(CPUARMState *env,
@@ -4557,8 +4530,7 @@ static void tlbi_aa64_rvae3_write(CPUARMState *env,
* flush-last-level-only.
*/
- do_rvae_write(env, value, ARMMMUIdxBit_SE3,
- tlb_force_broadcast(env));
+ do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env));
}
static void tlbi_aa64_rvae3is_write(CPUARMState *env,
@@ -4572,7 +4544,7 @@ static void tlbi_aa64_rvae3is_write(CPUARMState *env,
* flush-last-level-only or inner/outer specific flushes.
*/
- do_rvae_write(env, value, ARMMMUIdxBit_SE3, true);
+ do_rvae_write(env, value, ARMMMUIdxBit_E3, true);
}
#endif
@@ -10087,8 +10059,7 @@ uint64_t arm_sctlr(CPUARMState *env, int el)
/* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */
if (el == 0) {
ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
- el = (mmu_idx == ARMMMUIdx_E20_0 || mmu_idx == ARMMMUIdx_SE20_0)
- ? 2 : 1;
+ el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
}
return env->cp15.sctlr_el[el];
}
@@ -10632,22 +10603,15 @@ int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
switch (mmu_idx) {
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E20_0:
- case ARMMMUIdx_SE10_0:
- case ARMMMUIdx_SE20_0:
return 0;
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
- case ARMMMUIdx_SE10_1:
- case ARMMMUIdx_SE10_1_PAN:
return 1;
case ARMMMUIdx_E2:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
- case ARMMMUIdx_SE2:
- case ARMMMUIdx_SE20_2:
- case ARMMMUIdx_SE20_2_PAN:
return 2;
- case ARMMMUIdx_SE3:
+ case ARMMMUIdx_E3:
return 3;
default:
g_assert_not_reached();
@@ -10700,15 +10664,11 @@ ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el)
}
break;
case 3:
- return ARMMMUIdx_SE3;
+ return ARMMMUIdx_E3;
default:
g_assert_not_reached();
}
- if (arm_is_secure_below_el3(env)) {
- idx &= ~ARM_MMU_IDX_A_NS;
- }
-
return idx;
}
@@ -10911,15 +10871,11 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
switch (mmu_idx) {
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
- case ARMMMUIdx_SE10_1:
- case ARMMMUIdx_SE10_1_PAN:
/* TODO: ARMv8.3-NV */
DP_TBFLAG_A64(flags, UNPRIV, 1);
break;
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
- case ARMMMUIdx_SE20_2:
- case ARMMMUIdx_SE20_2_PAN:
/*
* Note that EL20_2 is gated by HCR_EL2.E2H == 1, but EL20_0 is
* gated by HCR_EL2.<E2H,TGE> == '11', and so is LDTR.
@@ -65,12 +65,6 @@ unsigned int arm_pamax(ARMCPU *cpu)
ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
{
switch (mmu_idx) {
- case ARMMMUIdx_SE10_0:
- return ARMMMUIdx_Stage1_SE0;
- case ARMMMUIdx_SE10_1:
- return ARMMMUIdx_Stage1_SE1;
- case ARMMMUIdx_SE10_1_PAN:
- return ARMMMUIdx_Stage1_SE1_PAN;
case ARMMMUIdx_E10_0:
return ARMMMUIdx_Stage1_E0;
case ARMMMUIdx_E10_1:
@@ -95,11 +89,8 @@ static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
static bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
{
switch (mmu_idx) {
- case ARMMMUIdx_SE10_0:
case ARMMMUIdx_E20_0:
- case ARMMMUIdx_SE20_0:
case ARMMMUIdx_Stage1_E0:
- case ARMMMUIdx_Stage1_SE0:
case ARMMMUIdx_MUser:
case ARMMMUIdx_MSUser:
case ARMMMUIdx_MUserNegPri:
@@ -2323,7 +2314,7 @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
}
s2_mmu_idx = (ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2);
- is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
+ is_el0 = mmu_idx == ARMMMUIdx_E10_0;
/*
* S1 is done, now do S2 translation.
@@ -2531,6 +2522,8 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
case ARMMMUIdx_E2:
+ is_secure = arm_is_secure_below_el3(env);
+ break;
case ARMMMUIdx_Stage2:
case ARMMMUIdx_MPrivNegPri:
case ARMMMUIdx_MUserNegPri:
@@ -2538,17 +2531,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
case ARMMMUIdx_MUser:
is_secure = false;
break;
- case ARMMMUIdx_SE3:
- case ARMMMUIdx_SE10_0:
- case ARMMMUIdx_SE10_1:
- case ARMMMUIdx_SE10_1_PAN:
- case ARMMMUIdx_SE20_0:
- case ARMMMUIdx_SE20_2:
- case ARMMMUIdx_SE20_2_PAN:
- case ARMMMUIdx_Stage1_SE0:
- case ARMMMUIdx_Stage1_SE1:
- case ARMMMUIdx_Stage1_SE1_PAN:
- case ARMMMUIdx_SE2:
+ case ARMMMUIdx_E3:
case ARMMMUIdx_Stage2_S:
case ARMMMUIdx_MSPrivNegPri:
case ARMMMUIdx_MSUserNegPri:
@@ -111,14 +111,6 @@ static int get_a64_user_mem_index(DisasContext *s)
case ARMMMUIdx_E20_2_PAN:
useridx = ARMMMUIdx_E20_0;
break;
- case ARMMMUIdx_SE10_1:
- case ARMMMUIdx_SE10_1_PAN:
- useridx = ARMMMUIdx_SE10_0;
- break;
- case ARMMMUIdx_SE20_2:
- case ARMMMUIdx_SE20_2_PAN:
- useridx = ARMMMUIdx_SE20_0;
- break;
default:
g_assert_not_reached();
}
@@ -237,16 +237,12 @@ static inline int get_a32_user_mem_index(DisasContext *s)
* otherwise, access as if at PL0.
*/
switch (s->mmu_idx) {
+ case ARMMMUIdx_E3:
case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
case ARMMMUIdx_E10_0:
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
- case ARMMMUIdx_SE3:
- case ARMMMUIdx_SE10_0:
- case ARMMMUIdx_SE10_1:
- case ARMMMUIdx_SE10_1_PAN:
- return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0);
case ARMMMUIdx_MUser:
case ARMMMUIdx_MPriv:
return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
For a-profile, which does not bank system registers, it takes quite a lot of code to switch between security states. In the process, registers such as TCR_EL{1,2} must be swapped, which in itself requires the flushing of softmmu tlbs. Therefore it doesn't buy us anything to separate tlbs by security state. Retain the distinction between Stage2 and Stage2_S. This will be important as we implement FEAT_RME, and do not wish to add a third set of mmu indexes for Realm state. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/arm/cpu-param.h | 2 +- target/arm/cpu.h | 69 +++++++----------- target/arm/internals.h | 31 +------- target/arm/helper.c | 144 +++++++++++++------------------------ target/arm/ptw.c | 25 ++----- target/arm/translate-a64.c | 8 --- target/arm/translate.c | 6 +- 7 files changed, 83 insertions(+), 202 deletions(-)