@@ -30,6 +30,7 @@ the following architecture extensions:
- FEAT_FRINTTS (Floating-point to integer instructions)
- FEAT_FlagM (Flag manipulation instructions v2)
- FEAT_FlagM2 (Enhancements to flag manipulation instructions)
+- FEAT_HAFDBS (Hardware management of the access flag and dirty bit state)
- FEAT_HCX (Support for the HCRX_EL2 register)
- FEAT_HPDS (Hierarchical permission disables)
- FEAT_I8MM (AArch64 Int8 matrix multiplication instructions)
@@ -1037,6 +1037,7 @@ static void aarch64_max_initfn(Object *obj)
cpu->isar.id_aa64mmfr0 = t;
t = cpu->isar.id_aa64mmfr1;
+ t = FIELD_DP64(t, ID_AA64MMFR1, HAFDBS, 2); /* FEAT_HAFDBS */
t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */
t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1); /* FEAT_VHE */
t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* FEAT_HPDS */
@@ -193,6 +193,7 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
typedef struct {
bool is_secure;
bool be;
+ bool rw;
void *hphys;
hwaddr gphys;
} S1TranslateResult;
@@ -221,6 +222,8 @@ static bool S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
return false;
}
+ res->rw = full->prot & PAGE_WRITE;
+
if (s2_mmu_idx == ARMMMUIdx_Stage2 || s2_mmu_idx == ARMMMUIdx_Stage2_S) {
uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
uint8_t s2attrs = full->pte_attrs;
@@ -333,6 +336,56 @@ static uint64_t arm_ldq_ptw(CPUARMState *env, const S1TranslateResult *s1,
return data;
}
+static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
+ uint64_t new_val, const S1TranslateResult *s1,
+ ARMMMUFaultInfo *fi)
+{
+ uint64_t cur_val;
+
+ if (unlikely(!s1->hphys)) {
+ fi->type = ARMFault_UnsuppAtomicUpdate;
+ fi->s1ptw = true;
+ return 0;
+ }
+
+#ifndef CONFIG_ATOMIC64
+ /*
+ * We can't support the atomic operation on the host. We should be
+ * running in round-robin mode though, which means that we would only
+ * race with dma i/o.
+ */
+ qemu_mutex_lock_iothread();
+ if (s1->be) {
+ cur_val = ldq_be_p(s1->hphys);
+ if (cur_val == old_val) {
+ stq_be_p(s1->hphys, new_val);
+ }
+ } else {
+ cur_val = ldq_le_p(s1->hphys);
+ if (cur_val == old_val) {
+ stq_le_p(s1->hphys, new_val);
+ }
+ }
+ qemu_mutex_unlock_iothread();
+#else
+ if (s1->be) {
+ old_val = cpu_to_be64(old_val);
+ new_val = cpu_to_be64(new_val);
+ cur_val = qatomic_cmpxchg__nocheck((uint64_t *)s1->hphys,
+ old_val, new_val);
+ cur_val = be64_to_cpu(cur_val);
+ } else {
+ old_val = cpu_to_le64(old_val);
+ new_val = cpu_to_le64(new_val);
+ cur_val = qatomic_cmpxchg__nocheck((uint64_t *)s1->hphys,
+ old_val, new_val);
+ cur_val = le64_to_cpu(cur_val);
+ }
+#endif
+
+ return cur_val;
+}
+
static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
uint32_t *table, uint32_t address)
{
@@ -1240,6 +1293,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
goto do_fault;
}
+ restart_atomic_update:
if (!(descriptor & 1) || (!(descriptor & 2) && (level == 3))) {
/* Invalid, or the Reserved level 3 encoding */
goto do_translation_fault;
@@ -1317,8 +1371,26 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
*/
if ((attrs & (1 << 10)) == 0) {
/* Access flag */
- fi->type = ARMFault_AccessFlag;
- goto do_fault;
+ uint64_t new_des, old_des;
+
+ /*
+ * If HA is disabled, or if the pte is not writable,
+ * pass on the access fault to software.
+ */
+ if (!param.ha || !s1.rw) {
+ fi->type = ARMFault_AccessFlag;
+ goto do_fault;
+ }
+
+ old_des = descriptor;
+ new_des = descriptor | (1 << 10); /* AF */
+ descriptor = arm_casq_ptw(env, old_des, new_des, &s1, fi);
+ if (fi->type != ARMFault_None) {
+ goto do_fault;
+ }
+ if (old_des != descriptor) {
+ goto restart_atomic_update;
+ }
}
ap = extract32(attrs, 6, 2);
@@ -1335,8 +1407,43 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
}
if (!(result->f.prot & (1 << access_type))) {
- fi->type = ARMFault_Permission;
- goto do_fault;
+ uint64_t new_des, old_des;
+
+ /* Writes may set dirty if DBM attribute is set. */
+ if (!param.hd
+ || access_type != MMU_DATA_STORE
+ || !extract64(attrs, 51, 1) /* DBM */
+ || !s1.rw) {
+ fi->type = ARMFault_Permission;
+ goto do_fault;
+ }
+
+ old_des = descriptor;
+ if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ new_des = descriptor | (1ull << 7); /* S2AP[1] */
+ } else {
+ new_des = descriptor & ~(1ull << 7); /* AP[2] */
+ }
+
+ /*
+ * If the descriptor didn't change, then attributes weren't the
+ * reason for the permission fault, so deliver it.
+ */
+ if (old_des == new_des) {
+ fi->type = ARMFault_Permission;
+ goto do_fault;
+ }
+
+ descriptor = arm_casq_ptw(env, old_des, new_des, &s1, fi);
+ if (fi->type != ARMFault_None) {
+ goto do_fault;
+ }
+ if (old_des != descriptor) {
+ goto restart_atomic_update;
+ }
+
+ /* Success: the page is now writable. */
+ result->f.prot |= 1 << MMU_DATA_STORE;
}
if (ns) {
Perform the atomic update for hardware management of the access flag and the dirty bit. A limitation of the implementation so far is that the page table itself must already be writable, i.e. the dirty bit for the stage2 page table must already be set, i.e. we cannot set both dirty bits at the same time. This is allowed because it is CONSTRAINED UNPREDICTABLE whether any atomic update happens at all. The implementation is allowed to simply fall back on software update at any time. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- docs/system/arm/emulation.rst | 1 + target/arm/cpu64.c | 1 + target/arm/ptw.c | 115 ++++++++++++++++++++++++++++++++-- 3 files changed, 113 insertions(+), 4 deletions(-)