@@ -602,6 +602,7 @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
int nregs = a->n + 1;
int vd = a->vd;
TCGv_i32 addr, tmp;
+ MemOp mop;
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
return false;
@@ -651,25 +652,47 @@ static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
return true;
}
+ mop = s->be_data | a->size;
+ if (a->align) {
+ static const MemOp mop_align[] = {
+ MO_ALIGN_2, MO_ALIGN_4, MO_ALIGN_8, MO_ALIGN_16
+ };
+
+ switch (nregs) {
+ case 1:
+ mop |= MO_ALIGN;
+ break;
+ case 2:
+ mop |= mop_align[a->size];
+ break;
+ case 3:
+ /* the align field is repurposed for VLD3 */
+ break;
+ case 4:
+ mop |= mop_align[a->size + a->align];
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
tmp = tcg_temp_new_i32();
addr = tcg_temp_new_i32();
load_reg_var(s, addr, a->rn);
- /*
- * TODO: if we implemented alignment exceptions, we should check
- * addr against the alignment encoded in a->align here.
- */
+
for (reg = 0; reg < nregs; reg++) {
if (a->l) {
- gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
- s->be_data | a->size);
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop);
neon_store_element(vd, a->reg_idx, a->size, tmp);
} else { /* Store */
neon_load_element(tmp, vd, a->reg_idx, a->size);
- gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
- s->be_data | a->size);
+ gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop);
}
vd += a->stride;
tcg_gen_addi_i32(addr, addr, 1 << a->size);
+
+ /* Subsequent memory operations inherit alignment */
+ mop &= ~MO_AMASK;
}
tcg_temp_free_i32(addr);
tcg_temp_free_i32(tmp);
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/arm/translate-neon.c.inc | 39 ++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 8 deletions(-) -- 2.25.1