@@ -1,4 +1,3 @@
TARGET_ARCH=alpha
TARGET_SYSTBL_ABI=common
TARGET_SYSTBL=syscall.tbl
-TARGET_ALIGNED_ONLY=y
@@ -293,14 +293,14 @@ static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
{
- tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
+ tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL | MO_ALIGN);
tcg_gen_mov_i64(cpu_lock_addr, t1);
tcg_gen_mov_i64(cpu_lock_value, t0);
}
static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
{
- tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
+ tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ | MO_ALIGN);
tcg_gen_mov_i64(cpu_lock_addr, t1);
tcg_gen_mov_i64(cpu_lock_value, t0);
}
@@ -2840,12 +2840,12 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
case 0x2E:
/* STL_C */
ret = gen_store_conditional(ctx, ra, rb, disp16,
- ctx->mem_idx, MO_LESL);
+ ctx->mem_idx, MO_LESL | MO_ALIGN);
break;
case 0x2F:
/* STQ_C */
ret = gen_store_conditional(ctx, ra, rb, disp16,
- ctx->mem_idx, MO_LEQ);
+ ctx->mem_idx, MO_LEQ | MO_ALIGN);
break;
case 0x30:
/* BR */
By default, the Linux kernel fixes up unaligned accesses. Therefore, as the kernel surrogate, qemu should as well. No fixups are done for load-locked/store-conditional, so mark those as MO_ALIGN. There is a syscall to disable this, and (among other things) deliver SIGBUS, but it is essentially unused. A survey of open source code shows no uses of SSI_NVPAIRS except trivial examples that show how to disable unaligned fixups. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- configs/targets/alpha-linux-user.mak | 1 - target/alpha/translate.c | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) -- 2.25.1