@@ -433,50 +433,20 @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
* @mmu_idx: MMU index to use for lookup
*
* Look up the specified guest virtual index in the TCG softmmu TLB.
- * If the TLB contains a host virtual address suitable for direct RAM
- * access, then return it. Otherwise (TLB miss, TLB entry is for an
- * I/O access, etc) return NULL.
- *
- * This is the equivalent of the initial fast-path code used by
- * TCG backends for guest load and store accesses.
+ * If we can translate a host virtual address suitable for direct RAM
+ * access, without causing a guest exception, then return it.
+ * Otherwise (TLB entry is for an I/O access, guest software
+ * TLB fill required, etc) return NULL.
*/
+#ifdef CONFIG_USER_ONLY
static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
- int access_type, int mmu_idx)
+ MMUAccessType access_type, int mmu_idx)
{
-#if defined(CONFIG_USER_ONLY)
return g2h(addr);
-#else
- CPUTLBEntry *tlbentry = tlb_entry(env, mmu_idx, addr);
- abi_ptr tlb_addr;
- uintptr_t haddr;
-
- switch (access_type) {
- case 0:
- tlb_addr = tlbentry->addr_read;
- break;
- case 1:
- tlb_addr = tlb_addr_write(tlbentry);
- break;
- case 2:
- tlb_addr = tlbentry->addr_code;
- break;
- default:
- g_assert_not_reached();
- }
-
- if (!tlb_hit(tlb_addr, addr)) {
- /* TLB entry is for a different page */
- return NULL;
- }
-
- if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- return NULL;
- }
-
- haddr = addr + tlbentry->addend;
- return (void *)haddr;
-#endif /* defined(CONFIG_USER_ONLY) */
}
+#else
+void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
+ MMUAccessType access_type, int mmu_idx);
+#endif
#endif /* CPU_LDST_H */
@@ -1007,6 +1007,16 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
}
}
+static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
+{
+#if TCG_OVERSIZED_GUEST
+ return *(target_ulong *)((uintptr_t)entry + ofs);
+#else
+ /* ofs might correspond to .addr_write, so use atomic_read */
+ return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
+#endif
+}
+
/* Return true if ADDR is present in the victim tlb, and has been copied
back to the main tlb. */
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
@@ -1017,14 +1027,7 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
assert_cpu_is_self(ENV_GET_CPU(env));
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
- target_ulong cmp;
-
- /* elt_ofs might correspond to .addr_write, so use atomic_read */
-#if TCG_OVERSIZED_GUEST
- cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
-#else
- cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
-#endif
+ target_ulong cmp = tlb_read_ofs(vtlb, elt_ofs);
if (cmp == page) {
/* Found entry in victim tlb, swap tlb and iotlb. */
@@ -1108,6 +1111,56 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
}
}
+void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
+ MMUAccessType access_type, int mmu_idx)
+{
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ uintptr_t tlb_addr, page;
+ size_t elt_ofs;
+
+ switch (access_type) {
+ case MMU_DATA_LOAD:
+ elt_ofs = offsetof(CPUTLBEntry, addr_read);
+ break;
+ case MMU_DATA_STORE:
+ elt_ofs = offsetof(CPUTLBEntry, addr_write);
+ break;
+ case MMU_INST_FETCH:
+ elt_ofs = offsetof(CPUTLBEntry, addr_code);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ page = addr & TARGET_PAGE_MASK;
+ tlb_addr = tlb_read_ofs(entry, elt_ofs);
+
+ if (!tlb_hit_page(tlb_addr, page)) {
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
+
+ if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
+ CPUState *cs = ENV_GET_CPU(env);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+
+ if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
+ /* Non-faulting page table read failed. */
+ return NULL;
+ }
+
+ /* TLB resize via tlb_fill may have moved the entry. */
+ entry = tlb_entry(env, mmu_idx, addr);
+ }
+ tlb_addr = tlb_read_ofs(entry, elt_ofs);
+ }
+
+ if (tlb_addr & ~TARGET_PAGE_MASK) {
+ /* IO access */
+ return NULL;
+ }
+
+ return (void *)((uintptr_t)addr + entry->addend);
+}
+
/* Probe for a read-modify-write atomic operation. Do not allow unaligned
* operations, or io operations to proceed. Return the host address. */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
@@ -4598,11 +4598,7 @@ static void sve_ldnf1_r(CPUARMState *env, void *vg, const target_ulong addr,
* in the real world, obviously.)
*
* Then there are the annoying special cases with watchpoints...
- *
- * TODO: Add a form of tlb_fill that does not raise an exception,
- * with a form of tlb_vaddr_to_host and a set of loads to match.
- * The non_fault_vaddr_to_host would handle everything, usually,
- * and the loads would handle the iomem path for watchpoints.
+ * TODO: Add a form of non-faulting loads using cc->tlb_fill(probe=true).
*/
host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx);
split = max_for_page(addr, mem_off, mem_max);