@@ -240,6 +240,22 @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
*/
void tlb_set_asid_for_mmuidx(CPUState *cpu, uint32_t asid,
uint16_t idxmap, uint16_t dep_idxmap);
+/**
+ * tlb_flush_asid_by_mmuidx:
+ * @cpu: Originating CPU of the flush
+ * @asid: Address Space Identifier
+ * @idxmap: bitmap of MMU indexes to flush if asid matches
+ *
+ * For each mmu index, if @asid matches the value previously saved via
+ * tlb_set_asid_for_mmuidx, flush the index.
+ */
+void tlb_flush_asid_by_mmuidx(CPUState *cpu, uint32_t asid, uint16_t idxmap);
+/* Similarly, broadcasting to all cpus. */
+void tlb_flush_asid_by_mmuidx_all_cpus(CPUState *cpu, uint32_t asid,
+ uint16_t idxmap);
+/* Similarly, waiting for the broadcast to complete. */
+void tlb_flush_asid_by_mmuidx_all_cpus_synced(CPUState *cpu, uint32_t asid,
+ uint16_t idxmap);
/**
* tlb_set_page_with_attrs:
* @cpu: CPU to add this TLB entry for
@@ -285,12 +285,14 @@ typedef union {
unsigned long host_ulong;
void *host_ptr;
vaddr target_ptr;
+ uint64_t uint64;
} run_on_cpu_data;
#define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)})
#define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)})
#define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
#define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)})
+#define RUN_ON_CPU_UINT64(i) ((run_on_cpu_data){.uint64 = (i)})
#define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL)
typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
@@ -540,6 +540,61 @@ void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
}
+static void tlb_flush_asid_by_mmuidx_async_work(CPUState *cpu,
+ run_on_cpu_data data)
+{
+ CPUTLB *tlb = cpu_tlb(cpu);
+ uint32_t asid = data.uint64;
+ uint16_t idxmap = data.uint64 >> 32;
+ uint16_t to_flush = 0, work;
+
+ assert_cpu_is_self(cpu);
+
+ for (work = idxmap; work != 0; work &= work - 1) {
+ int mmu_idx = ctz32(work);
+ if (tlb->d[mmu_idx].asid == asid) {
+ to_flush |= 1 << mmu_idx;
+ }
+ }
+
+ if (to_flush) {
+ tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(to_flush));
+ }
+}
+
+void tlb_flush_asid_by_mmuidx(CPUState *cpu, uint32_t asid, uint16_t idxmap)
+{
+ uint64_t asid_idx = deposit64(asid, 32, 32, idxmap);
+
+ if (cpu->created && !qemu_cpu_is_self(cpu)) {
+ async_run_on_cpu(cpu, tlb_flush_asid_by_mmuidx_async_work,
+ RUN_ON_CPU_UINT64(asid_idx));
+ } else {
+ tlb_flush_asid_by_mmuidx_async_work(cpu, RUN_ON_CPU_UINT64(asid_idx));
+ }
+}
+
+void tlb_flush_asid_by_mmuidx_all_cpus(CPUState *src_cpu,
+ uint32_t asid, uint16_t idxmap)
+{
+ uint64_t asid_idx = deposit64(asid, 32, 32, idxmap);
+
+ flush_all_helper(src_cpu, tlb_flush_asid_by_mmuidx_async_work,
+ RUN_ON_CPU_UINT64(asid_idx));
+ tlb_flush_asid_by_mmuidx_async_work(src_cpu, RUN_ON_CPU_UINT64(asid_idx));
+}
+
+void tlb_flush_asid_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
+ uint32_t asid, uint16_t idxmap)
+{
+ uint64_t asid_idx = deposit64(asid, 32, 32, idxmap);
+
+ flush_all_helper(src_cpu, tlb_flush_asid_by_mmuidx_async_work,
+ RUN_ON_CPU_UINT64(asid_idx));
+ async_safe_run_on_cpu(src_cpu, tlb_flush_asid_by_mmuidx_async_work,
+ RUN_ON_CPU_UINT64(asid_idx));
+}
+
void tlb_set_asid_for_mmuidx(CPUState *cpu, uint32_t asid, uint16_t idxmap,
uint16_t depmap)
{