@@ -1929,12 +1929,6 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
CPUIOTLBEntry *iotlbentry;
bool need_swap;
- /* For anything that is unaligned, recurse through byte_load. */
- if ((addr & (size - 1)) != 0) {
- return load_helper_unaligned(env, addr, oi, retaddr, op,
- code_read, byte_load);
- }
-
iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
/* Handle watchpoints. */
@@ -2425,7 +2419,6 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
if (size > 1
&& unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
>= TARGET_PAGE_SIZE)) {
- do_unaligned_access:
store_helper_unaligned(env, addr, val, retaddr, size,
mmu_idx, memop_big_endian(op));
return;
@@ -2436,11 +2429,6 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
CPUIOTLBEntry *iotlbentry;
bool need_swap;
- /* For anything that is unaligned, recurse through byte stores. */
- if ((addr & (size - 1)) != 0) {
- goto do_unaligned_access;
- }
-
iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
/* Handle watchpoints. */
For unaligned i/o accesses that do not cross pages, do not handle the misalignment in cputlb, but let the memory system deal with it. RFC because this, for the first time, exposes many guests to the existing mr->ops->valid.unaligned checks in the memory subsystem. Previously this code was only reachable when guest code explicitly calls memory_region_dispatch_*. This does in fact trip up the original m68k q800 testcase, #360. Since this hasn't really been reachable, I'm willing to bet that every device is wrong wrt mr->ops->valid.unaligned, and possibly that we shouldn't even have it. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- accel/tcg/cputlb.c | 12 ------------ 1 file changed, 12 deletions(-) -- 2.25.1