@@ -32,12 +32,12 @@
# define TARGET_PAGE_BITS_MIN 10
/*
- * Cache the attrs and sharability fields from the page table entry.
+ * Cache the attrs, sharability, and gp fields from the page table entry.
*/
# define TARGET_PAGE_ENTRY_EXTRA \
- uint8_t pte_attrs; \
- uint8_t shareability;
-
+ uint8_t pte_attrs; \
+ uint8_t shareability; \
+ bool guarded;
#endif
#define NB_MMU_MODES 8
@@ -3376,19 +3376,6 @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
/* Shared between translate-sve.c and sve_helper.c. */
extern const uint64_t pred_esz_masks[5];
-/* Helper for the macros below, validating the argument type. */
-static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
-{
- return x;
-}
-
-/*
- * Lvalue macros for ARM TLB bits that we must cache in the TCG TLB.
- * Using these should be a bit more self-documenting than using the
- * generic target bits directly.
- */
-#define arm_tlb_bti_gp(x) (typecheck_memtxattrs(x)->target_tlb_bit0)
-
/*
* AArch64 usage of the PAGE_TARGET_* bits for linux-user.
* Note that with the Linux kernel, PROT_MTE may not be cleared by mprotect
@@ -1067,6 +1067,7 @@ typedef struct ARMCacheAttrs {
unsigned int attrs:8;
unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
bool is_s2_format:1;
+ bool guarded:1; /* guarded bit of the v8-64 PTE */
} ARMCacheAttrs;
/* Fields that are valid upon success. */
@@ -1319,9 +1319,10 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
*/
result->f.attrs.secure = false;
}
- /* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
- if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
- arm_tlb_bti_gp(&result->f.attrs) = true;
+
+ /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
+ if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
+ result->f.guarded = guarded;
}
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
@@ -14611,22 +14611,16 @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s)
#ifdef CONFIG_USER_ONLY
return page_get_flags(addr) & PAGE_BTI;
#else
+ CPUTLBEntryFull *full;
+ void *host;
int mmu_idx = arm_to_core_mmu_idx(s->mmu_idx);
- unsigned int index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ int flags;
- /*
- * We test this immediately after reading an insn, which means
- * that any normal page must be in the TLB. The only exception
- * would be for executing from flash or device memory, which
- * does not retain the TLB entry.
- *
- * FIXME: Assume false for those, for now. We could use
- * arm_cpu_get_phys_page_attrs_debug to re-read the page
- * table entry even for that case.
- */
- return (tlb_hit(entry->addr_code, addr) &&
- arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].fulltlb[index].attrs));
+ flags = probe_access_full(env, addr, MMU_INST_FETCH, mmu_idx,
+ false, &host, &full, 0);
+ assert(!(flags & TLB_INVALID_MASK));
+
+ return full->guarded;
#endif
}
Add a field to TARGET_PAGE_ENTRY_EXTRA to hold the guarded bit. In is_guarded_page, use probe_access_full instead of just guessing that the tlb entry is still present. Also handles the FIXME about executing from device memory. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/arm/cpu-param.h | 8 ++++---- target/arm/cpu.h | 13 ------------- target/arm/internals.h | 1 + target/arm/ptw.c | 7 ++++--- target/arm/translate-a64.c | 22 ++++++++-------------- 5 files changed, 17 insertions(+), 34 deletions(-)