@@ -439,4 +439,15 @@ static inline CPUTLB *env_tlb(CPUArchState *env)
return &env_neg(env)->tlb;
}
+/**
+ * cpu_tlb(env)
+ * @cpu: The generic CPUState
+ *
+ * Return the CPUTLB state associated with the cpu.
+ */
+static inline CPUTLB *cpu_tlb(CPUState *cpu)
+{
+ return &cpu_neg(cpu)->tlb;
+}
+
#endif /* CPU_ALL_H */
@@ -169,6 +169,8 @@ typedef struct CPUTLBDesc {
size_t n_used_entries;
/* The next index to use in the tlb victim table. */
size_t vindex;
+ /* The current ASID for this tlb, if used; otherwise ignored. */
+ uint32_t asid;
/* The tlb victim table, in two parts. */
CPUTLBEntry vtable[CPU_VTLB_SIZE];
CPUIOTLBEntry viotlb[CPU_VTLB_SIZE];
@@ -225,6 +225,21 @@ void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
* depend on when the guests translation ends the TB.
*/
void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
+/**
+ * tlb_set_asid_for_mmuidx:
+ * @cpu: Originating cpu
+ * @asid: Address Space Identifier
+ * @idxmap: bitmap of MMU indexes to set to @asid
+ * @depmap: bitmap of dependent MMU indexes
+ *
+ * Set an ASID for all of @idxmap. If any previous ASID was different,
+ * then we will flush the mmu idx. If a flush is required, then also flush
+ * all dependent mmu indicies in @depmap. This latter is typically used
+ * for secondary page resolution, for implementing virtualization within
+ * the guest.
+ */
+void tlb_set_asid_for_mmuidx(CPUState *cpu, uint32_t asid,
+ uint16_t idxmap, uint16_t dep_idxmap);
/**
* tlb_set_page_with_attrs:
* @cpu: CPU to add this TLB entry for
@@ -310,6 +325,10 @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
uint16_t idxmap)
{
}
+static inline void tlb_set_asid_for_mmuidx(CPUState *cpu, uint32_t asid,
+ uint16_t idxmap, uint16_t depmap)
+{
+}
#endif
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
@@ -540,6 +540,32 @@ void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
}
+void tlb_set_asid_for_mmuidx(CPUState *cpu, uint32_t asid, uint16_t idxmap,
+ uint16_t depmap)
+{
+ CPUTLB *tlb = cpu_tlb(cpu);
+ uint16_t work, to_flush = 0;
+
+ /* It doesn't make sense to set context across cpus. */
+ assert_cpu_is_self(cpu);
+
+ /*
+ * We don't support ASIDs except for trivially.
+ * If there is any change, then we must flush the TLB.
+ */
+ for (work = idxmap; work != 0; work &= work - 1) {
+ int mmu_idx = ctz32(work);
+ if (tlb->d[mmu_idx].asid != asid) {
+ tlb->d[mmu_idx].asid = asid;
+ to_flush |= 1 << mmu_idx;
+ }
+ }
+ if (to_flush) {
+ to_flush |= depmap;
+ tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(to_flush));
+ }
+}
+
/* update the TLBs so that writes to code in the virtual page 'addr'
can be detected */
void tlb_protect_code(ram_addr_t ram_addr)