@@ -378,12 +378,10 @@ CPUArchState *cpu_copy(CPUArchState *env);
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
/* Set if TLB entry is an IO callback. */
#define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
-/* Set if TLB entry contains a watchpoint. */
-#define TLB_WATCHPOINT (1 << (TARGET_PAGE_BITS_MIN - 4))
+/* Set if TLB entry writes ignored. */
+#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 4))
/* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
#define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5))
-/* Set if TLB entry writes ignored. */
-#define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 6))
/*
* Use this mask to check interception with an alignment mask
@@ -391,7 +389,7 @@ CPUArchState *cpu_copy(CPUArchState *env);
*/
#define TLB_FLAGS_MASK \
(TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
- | TLB_WATCHPOINT | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
+ | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
/*
* Flags stored in CPUTLBEntryFull.slow_flags[x].
@@ -399,8 +397,10 @@ CPUArchState *cpu_copy(CPUArchState *env);
*/
/* Set if TLB entry requires byte swap. */
#define TLB_BSWAP (1 << 0)
+/* Set if TLB entry contains a watchpoint. */
+#define TLB_WATCHPOINT (1 << 1)
-#define TLB_SLOW_FLAGS_MASK TLB_BSWAP
+#define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT)
/* The two sets of flags must not overlap. */
QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
@@ -1966,7 +1966,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
*/
goto stop_the_world;
}
- /* Collect TLB_WATCHPOINT for read. */
+ /* Collect tlb flags for read. */
tlb_addr |= tlbe->addr_read;
}
} else /* if (prot & PAGE_READ) */ {
@@ -1997,12 +1997,21 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
notdirty_write(env_cpu(env), addr, size, full, retaddr);
}
- if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
- QEMU_BUILD_BUG_ON(PAGE_READ != BP_MEM_READ);
- QEMU_BUILD_BUG_ON(PAGE_WRITE != BP_MEM_WRITE);
- /* therefore prot == watchpoint bits */
- cpu_check_watchpoint(env_cpu(env), addr, size,
- full->attrs, prot, retaddr);
+ if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
+ int wp_flags = 0;
+
+ if ((prot & PAGE_WRITE) &&
+ (full->slow_flags[MMU_DATA_STORE] & TLB_WATCHPOINT)) {
+ wp_flags |= BP_MEM_WRITE;
+ }
+ if ((prot & PAGE_READ) &&
+ (full->slow_flags[MMU_DATA_LOAD] & TLB_WATCHPOINT)) {
+ wp_flags |= BP_MEM_READ;
+ }
+ if (wp_flags) {
+ cpu_check_watchpoint(env_cpu(env), addr, size,
+ full->attrs, wp_flags, retaddr);
+ }
}
return hostaddr;
This frees up one bit of the primary tlb flags without impacting the TLB_NOTDIRTY logic. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- include/exec/cpu-all.h | 12 ++++++------ accel/tcg/cputlb.c | 23 ++++++++++++++++------- 2 files changed, 22 insertions(+), 13 deletions(-)