@@ -1032,6 +1032,7 @@ static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
*/
typedef struct ARMVAParameters {
unsigned tsz : 8;
+ unsigned ps : 3;
unsigned select : 1;
bool tbi : 1;
bool epd : 1;
@@ -11147,17 +11147,19 @@ static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
}
#endif /* !CONFIG_USER_ONLY */
+/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
+static const uint8_t pamax_map[] = {
+ [0] = 32,
+ [1] = 36,
+ [2] = 40,
+ [3] = 42,
+ [4] = 44,
+ [5] = 48,
+};
+
/* The cpu-specific constant value of PAMax; also used by hw/arm/virt. */
unsigned int arm_pamax(ARMCPU *cpu)
{
- static const unsigned int pamax_map[] = {
- [0] = 32,
- [1] = 36,
- [2] = 40,
- [3] = 42,
- [4] = 44,
- [5] = 48,
- };
unsigned int parange =
FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
@@ -11208,7 +11210,7 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
{
uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr;
bool epd, hpd, using16k, using64k, tsz_oob;
- int select, tsz, tbi, max_tsz, min_tsz;
+ int select, tsz, tbi, max_tsz, min_tsz, ps;
if (!regime_has_2_ranges(mmu_idx)) {
select = 0;
@@ -11222,6 +11224,7 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
hpd = extract32(tcr, 24, 1);
}
epd = false;
+ ps = extract32(tcr, 16, 3);
} else {
/*
* Bit 55 is always between the two regions, and is canonical for
@@ -11242,6 +11245,7 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
epd = extract32(tcr, 23, 1);
hpd = extract64(tcr, 42, 1);
}
+ ps = extract64(tcr, 32, 3);
}
if (cpu_isar_feature(aa64_st, env_archcpu(env))) {
@@ -11270,6 +11274,7 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
return (ARMVAParameters) {
.tsz = tsz,
+ .ps = ps,
.select = select,
.tbi = tbi,
.epd = epd,
@@ -11397,6 +11402,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
/* TODO: This code does not support shareability levels. */
if (aarch64) {
+ int ps;
+
param = aa64_va_parameters(env, address, mmu_idx,
access_type != MMU_INST_FETCH);
level = 0;
@@ -11417,7 +11424,16 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
addrsize = 64 - 8 * param.tbi;
inputsize = 64 - param.tsz;
- outputsize = arm_pamax(cpu);
+
+ /*
+ * Bound PS by PARANGE to find the effective output address size.
+ * ID_AA64MMFR0 is a read-only register so values outside of the
+ * supported mappings can be considered an implementation error.
+ */
+ ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
+ ps = MIN(ps, param.ps);
+ assert(ps < ARRAY_SIZE(pamax_map));
+ outputsize = pamax_map[ps];
} else {
param = aa32_va_parameters(env, address, mmu_idx);
level = 1;
@@ -11521,19 +11537,38 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
/* Now we can extract the actual base address from the TTBR */
descaddr = extract64(ttbr, 0, 48);
+
+ /*
+ * If the base address is out of range, raise AddressSizeFault.
+ * In the pseudocode, this is !IsZero(baseregister<47:outputsize>),
+ * but we've just cleared the bits above 47, so simplify the test.
+ */
+ if (descaddr >> outputsize) {
+ level = 0;
+ fault_type = ARMFault_AddressSize;
+ goto do_fault;
+ }
+
/*
* We rely on this masking to clear the RES0 bits at the bottom of the TTBR
* and also to mask out CnP (bit 0) which could validly be non-zero.
*/
descaddr &= ~indexmask;
- /* The address field in the descriptor goes up to bit 39 for ARMv7
- * but up to bit 47 for ARMv8, but we use the descaddrmask
- * up to bit 39 for AArch32, because we don't need other bits in that case
- * to construct next descriptor address (anyway they should be all zeroes).
+ /*
+ * For AArch32, the address field in the descriptor goes up to bit 39
+ * for both v7 and v8. However, for v8 the SBZ bits [47:40] must be 0
+ * or an AddressSize fault is raised. So for v8 we extract those SBZ
+ * bits as part of the address, which will be checked via outputsize.
+ * For AArch64, the address field always goes up to bit 47 (with extra
+ * bits for FEAT_LPA placed elsewhere). AArch64 implies v8.
*/
- descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
- ~indexmask_grainsize;
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ descaddrmask = MAKE_64BIT_MASK(0, 48);
+ } else {
+ descaddrmask = MAKE_64BIT_MASK(0, 40);
+ }
+ descaddrmask &= ~indexmask_grainsize;
/* Secure accesses start with the page table in secure memory and
* can be downgraded to non-secure at any step. Non-secure accesses
@@ -11558,7 +11593,12 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
/* Invalid, or the Reserved level 3 encoding */
goto do_fault;
}
+
descaddr = descriptor & descaddrmask;
+ if (descaddr >> outputsize) {
+ fault_type = ARMFault_AddressSize;
+ goto do_fault;
+ }
if ((descriptor & 2) && (level < 3)) {
/* Table entry. The top five bits are attributes which may