@@ -135,9 +135,21 @@ typedef struct CPUTLBEntryFull {
* This may be used to cache items from the guest cpu
* page tables for later use by the implementation.
*/
-#ifdef TARGET_PAGE_ENTRY_EXTRA
- TARGET_PAGE_ENTRY_EXTRA
-#endif
+ union {
+ /*
+ * Cache the attrs and shareability fields from the page table entry.
+ *
+ * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
+ * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
+ * For shareability and guarded, as in the SH and GP fields respectively
+ * of the VMSAv8-64 PTEs.
+ */
+ struct {
+ uint8_t pte_attrs;
+ uint8_t shareability;
+ bool guarded;
+ } arm;
+ } extra;
} CPUTLBEntryFull;
#endif /* CONFIG_SOFTMMU */
@@ -31,18 +31,6 @@
# define TARGET_PAGE_BITS_VARY
# define TARGET_PAGE_BITS_MIN 10
-/*
- * Cache the attrs and shareability fields from the page table entry.
- *
- * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
- * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
- * For shareability and guarded, as in the SH and GP fields respectively
- * of the VMSAv8-64 PTEs.
- */
-# define TARGET_PAGE_ENTRY_EXTRA \
- uint8_t pte_attrs; \
- uint8_t shareability; \
- bool guarded;
#endif
#endif
@@ -579,7 +579,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
}
ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
ptw->out_rw = full->prot & PAGE_WRITE;
- pte_attrs = full->pte_attrs;
+ pte_attrs = full->extra.arm.pte_attrs;
ptw->out_space = full->attrs.space;
#else
g_assert_not_reached();
@@ -2036,7 +2036,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
/* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {
- result->f.guarded = extract64(attrs, 50, 1); /* GP */
+ result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */
}
}
@@ -137,7 +137,7 @@ static uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
assert(!(flags & TLB_INVALID_MASK));
/* If the virtual page MemAttr != Tagged, access unchecked. */
- if (full->pte_attrs != 0xf0) {
+ if (full->extra.arm.pte_attrs != 0xf0) {
return NULL;
}
@@ -5373,7 +5373,7 @@ bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
info->tagged = (flags & PAGE_ANON) && (flags & PAGE_MTE);
#else
info->attrs = full->attrs;
- info->tagged = full->pte_attrs == 0xf0;
+ info->tagged = full->extra.arm.pte_attrs == 0xf0;
#endif
/* Ensure that info->host[] is relative to addr, not addr + mem_off. */
@@ -334,8 +334,8 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
address &= TARGET_PAGE_MASK;
}
- res.f.pte_attrs = res.cacheattrs.attrs;
- res.f.shareability = res.cacheattrs.shareability;
+ res.f.extra.arm.pte_attrs = res.cacheattrs.attrs;
+ res.f.extra.arm.shareability = res.cacheattrs.shareability;
tlb_set_page_full(cs, mmu_idx, address, &res.f);
return true;
@@ -13904,7 +13904,7 @@ static bool is_guarded_page(CPUARMState *env, DisasContext *s)
false, &host, &full, 0);
assert(!(flags & TLB_INVALID_MASK));
- return full->guarded;
+ return full->extra.arm.guarded;
#endif
}