@@ -3393,6 +3393,19 @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno)
/* Shared between translate-sve.c and sve_helper.c. */
extern const uint64_t pred_esz_masks[4];
+/* Helper for the macros below, validating the argument type. */
+static inline MemTxAttrs *typecheck_memtxattrs(MemTxAttrs *x)
+{
+ return x;
+}
+
+/*
+ * Lvalue macros for ARM TLB bits that we must cache in the TCG TLB.
+ * Using these should be a bit more self-documenting than using the
+ * generic target bits directly.
+ */
+#define arm_tlb_bti_gp(x) (typecheck_memtxattrs(x)->target_tlb_bit0)
+
/*
* Naming convention for isar_feature functions:
* Functions which test 32-bit ID registers should have _aa32_ in
@@ -11079,7 +11079,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
}
/* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
- txattrs->target_tlb_bit0 = true;
+ arm_tlb_bti_gp(txattrs) = true;
}
if (cacheattrs != NULL) {
@@ -14434,7 +14434,7 @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s)
* table entry even for that case.
*/
return (tlb_hit(entry->addr_code, addr) &&
- env_tlb(env)->d[mmu_idx].iotlb[index].attrs.target_tlb_bit0);
+ arm_tlb_bti_gp(&env_tlb(env)->d[mmu_idx].iotlb[index].attrs));
#endif
}