@@ -4097,6 +4097,39 @@ static inline bool isar_feature_aa64_tgran16_2_lpa2(const ARMISARegisters *id)
return t >= 3 || (t == 0 && isar_feature_aa64_tgran16_lpa2(id));
}
+static inline bool isar_feature_aa64_tgran4(const ARMISARegisters *id)
+{
+ return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 0;
+}
+
+static inline bool isar_feature_aa64_tgran16(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 1;
+}
+
+static inline bool isar_feature_aa64_tgran64(const ARMISARegisters *id)
+{
+ return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64) >= 0;
+}
+
+static inline bool isar_feature_aa64_tgran4_2(const ARMISARegisters *id)
+{
+ unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2);
+ return t >= 2 || (t == 0 && isar_feature_aa64_tgran4(id));
+}
+
+static inline bool isar_feature_aa64_tgran16_2(const ARMISARegisters *id)
+{
+ unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2);
+ return t >= 2 || (t == 0 && isar_feature_aa64_tgran16(id));
+}
+
+static inline bool isar_feature_aa64_tgran64_2(const ARMISARegisters *id)
+{
+ unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64_2);
+ return t >= 2 || (t == 0 && isar_feature_aa64_tgran64(id));
+}
+
static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0;
@@ -998,6 +998,15 @@ static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
return valid;
}
+/* Granule size (i.e. page size) */
+typedef enum ARMGranuleSize {
+ /* Same order as TG0 encoding */
+ Gran4K,
+ Gran64K,
+ Gran16K,
+ GranInvalid,
+} ARMGranuleSize;
+
/*
* Parameters of a given virtual address, as extracted from the
* translation control register (TCR) for a given regime.
@@ -10287,20 +10287,105 @@ static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
}
}
+static ARMGranuleSize tg0_to_gran_size(int tg)
+{
+ switch (tg) {
+ case 0:
+ return Gran4K;
+ case 1:
+ return Gran64K;
+ case 2:
+ return Gran16K;
+ default:
+ return GranInvalid;
+ }
+}
+
+static ARMGranuleSize tg1_to_gran_size(int tg)
+{
+ switch (tg) {
+ case 1:
+ return Gran16K;
+ case 2:
+ return Gran4K;
+ case 3:
+ return Gran64K;
+ default:
+ return GranInvalid;
+ }
+}
+
+static inline bool have4k(ARMCPU *cpu, bool stage2)
+{
+ return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu)
+ : cpu_isar_feature(aa64_tgran4, cpu);
+}
+
+static inline bool have16k(ARMCPU *cpu, bool stage2)
+{
+ return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu)
+ : cpu_isar_feature(aa64_tgran16, cpu);
+}
+
+static inline bool have64k(ARMCPU *cpu, bool stage2)
+{
+ return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu)
+ : cpu_isar_feature(aa64_tgran64, cpu);
+}
+
+static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran,
+ bool stage2)
+{
+ switch (gran) {
+ case Gran4K:
+ if (have4k(cpu, stage2)) {
+ return gran;
+ }
+ break;
+ case Gran16K:
+ if (have16k(cpu, stage2)) {
+ return gran;
+ }
+ break;
+ case Gran64K:
+ if (have64k(cpu, stage2)) {
+ return gran;
+ }
+ break;
+ case GranInvalid:
+ break;
+ }
+ /*
+ * If the guest selects a granule size that isn't implemented,
+ * the architecture requires that we behave as if it selected one
+ * that is (with an IMPDEF choice of which one to pick). We choose
+ * to implement the smallest supported granule size.
+ */
+ if (have4k(cpu, stage2)) {
+ return Gran4K;
+ }
+ if (have16k(cpu, stage2)) {
+ return Gran16K;
+ }
+ assert(have64k(cpu, stage2));
+ return Gran64K;
+}
+
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
ARMMMUIdx mmu_idx, bool data)
{
uint64_t tcr = regime_tcr(env, mmu_idx);
bool epd, hpd, using16k, using64k, tsz_oob, ds;
int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
+ ARMGranuleSize gran;
ARMCPU *cpu = env_archcpu(env);
+ bool stage2 = mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
if (!regime_has_2_ranges(mmu_idx)) {
select = 0;
tsz = extract32(tcr, 0, 6);
- using64k = extract32(tcr, 14, 1);
- using16k = extract32(tcr, 15, 1);
- if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ gran = tg0_to_gran_size(extract32(tcr, 14, 2));
+ if (stage2) {
/* VTCR_EL2 */
hpd = false;
} else {
@@ -10318,16 +10403,13 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
select = extract64(va, 55, 1);
if (!select) {
tsz = extract32(tcr, 0, 6);
+ gran = tg0_to_gran_size(extract32(tcr, 14, 2));
epd = extract32(tcr, 7, 1);
sh = extract32(tcr, 12, 2);
- using64k = extract32(tcr, 14, 1);
- using16k = extract32(tcr, 15, 1);
hpd = extract64(tcr, 41, 1);
} else {
- int tg = extract32(tcr, 30, 2);
- using16k = tg == 1;
- using64k = tg == 3;
tsz = extract32(tcr, 16, 6);
+ gran = tg1_to_gran_size(extract32(tcr, 30, 2));
epd = extract32(tcr, 23, 1);
sh = extract32(tcr, 28, 2);
hpd = extract64(tcr, 42, 1);
@@ -10336,6 +10418,10 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
ds = extract64(tcr, 59, 1);
}
+ gran = sanitize_gran_size(cpu, gran, stage2);
+ using64k = gran == Gran64K;
+ using16k = gran == Gran16K;
+
if (cpu_isar_feature(aa64_st, cpu)) {
max_tsz = 48 - using64k;
} else {