@@ -673,6 +673,11 @@ static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
}
}
+static inline bool regime_is_stage2(ARMMMUIdx mmu_idx)
+{
+ return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
+}
+
/* Return the exception level which controls this address translation regime */
static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
{
@@ -10352,7 +10352,7 @@ int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx)
{
if (regime_has_2_ranges(mmu_idx)) {
return extract64(tcr, 37, 2);
- } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ } else if (regime_is_stage2(mmu_idx)) {
return 0; /* VTCR_EL2 */
} else {
/* Replicate the single TBI bit so we always have 2 bits. */
@@ -10364,7 +10364,7 @@ int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx)
{
if (regime_has_2_ranges(mmu_idx)) {
return extract64(tcr, 51, 2);
- } else if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ } else if (regime_is_stage2(mmu_idx)) {
return 0; /* VTCR_EL2 */
} else {
/* Replicate the single TBID bit so we always have 2 bits. */
@@ -10474,7 +10474,7 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
ARMGranuleSize gran;
ARMCPU *cpu = env_archcpu(env);
- bool stage2 = mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
+ bool stage2 = regime_is_stage2(mmu_idx);
if (!regime_has_2_ranges(mmu_idx)) {
select = 0;
@@ -10532,22 +10532,18 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
}
ds = false;
} else if (ds) {
- switch (mmu_idx) {
- case ARMMMUIdx_Stage2:
- case ARMMMUIdx_Stage2_S:
+ if (regime_is_stage2(mmu_idx)) {
if (gran == Gran16K) {
ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu);
} else {
ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu);
}
- break;
- default:
+ } else {
if (gran == Gran16K) {
ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu);
} else {
ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu);
}
- break;
}
if (ds) {
min_tsz = 12;
@@ -842,8 +842,7 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
bool have_wxn;
int wxn = 0;
- assert(mmu_idx != ARMMMUIdx_Stage2);
- assert(mmu_idx != ARMMMUIdx_Stage2_S);
+ assert(!regime_is_stage2(mmu_idx));
user_rw = simple_ap_to_rw_prot_is_user(ap, true);
if (is_user) {
@@ -1171,7 +1170,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
goto do_fault;
}
- if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
+ if (!regime_is_stage2(mmu_idx)) {
/*
* The starting level depends on the virtual address size (which can
* be up to 48 bits) and the translation granule size. It indicates
@@ -1342,7 +1341,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
attrs = extract64(descriptor, 2, 10)
| (extract64(descriptor, 52, 12) << 10);
- if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ if (regime_is_stage2(mmu_idx)) {
/* Stage 2 table descriptors do not include any attribute fields */
break;
}
@@ -1374,7 +1373,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
ap = extract32(attrs, 4, 2);
- if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ if (regime_is_stage2(mmu_idx)) {
ns = mmu_idx == ARMMMUIdx_Stage2;
xn = extract32(attrs, 11, 2);
result->f.prot = get_S2prot(env, ap, xn, s1_is_el0);
@@ -1404,7 +1403,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
result->f.guarded = guarded;
}
- if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ if (regime_is_stage2(mmu_idx)) {
result->cacheattrs.is_s2_format = true;
result->cacheattrs.attrs = extract32(attrs, 0, 4);
} else {
@@ -1435,8 +1434,7 @@ do_fault:
fi->type = fault_type;
fi->level = level;
/* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
- fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_Stage2 ||
- mmu_idx == ARMMMUIdx_Stage2_S);
+ fi->stage2 = fi->s1ptw || regime_is_stage2(mmu_idx);
fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
return true;
}