Message ID | 20190726175032.6769-8-richard.henderson@linaro.org |
---|---|
State | New |
Headers | show |
Series | target/arm: Convert aa32 base isa to decodetree | expand |
On Fri, 26 Jul 2019 at 18:50, Richard Henderson <richard.henderson@linaro.org> wrote: > > Used only on the thumb side so far, but will be more obvious > once we start unifying the implementation of A32+T32. > > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > target/arm/translate-vfp.inc.c | 34 +------ > target/arm/translate.c | 163 +++++++++++++++------------------ > 2 files changed, 76 insertions(+), 121 deletions(-) > > diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c > index e7389bc057..4066b2febf 100644 > --- a/target/arm/translate-vfp.inc.c > +++ b/target/arm/translate-vfp.inc.c > @@ -941,14 +941,7 @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a) > offset = -offset; > } > > - if (s->thumb && a->rn == 15) { > - /* This is actually UNPREDICTABLE */ > - addr = tcg_temp_new_i32(); > - tcg_gen_movi_i32(addr, s->pc & ~2); > - } else { > - addr = load_reg(s, a->rn); > - } > - tcg_gen_addi_i32(addr, addr, offset); > + addr = add_reg_for_lit(s, a->rn, offset); > tmp = tcg_temp_new_i32(); > if (a->l) { > gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); > @@ -983,14 +976,7 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a) > offset = -offset; > } > > - if (s->thumb && a->rn == 15) { > - /* This is actually UNPREDICTABLE */ > - addr = tcg_temp_new_i32(); > - tcg_gen_movi_i32(addr, s->pc & ~2); > - } else { > - addr = load_reg(s, a->rn); > - } > - tcg_gen_addi_i32(addr, addr, offset); > + addr = add_reg_for_lit(s, a->rn, offset); > tmp = tcg_temp_new_i64(); > if (a->l) { > gen_aa32_ld64(s, tmp, addr, get_mem_index(s)); > @@ -1029,13 +1015,7 @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a) > return true; > } > > - if (s->thumb && a->rn == 15) { > - /* This is actually UNPREDICTABLE */ > - addr = tcg_temp_new_i32(); > - tcg_gen_movi_i32(addr, s->pc & ~2); > - } else { > - addr = load_reg(s, a->rn); > - } > + addr = add_reg_for_lit(s, a->rn, 0); > if (a->p) { > /* pre-decrement */ > tcg_gen_addi_i32(addr, addr, -(a->imm << 2)); > @@ -1112,13 +1092,7 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) > return true; > } > > - if (s->thumb && a->rn == 15) { > - /* This is actually UNPREDICTABLE */ > - addr = tcg_temp_new_i32(); > - tcg_gen_movi_i32(addr, s->pc & ~2); > - } else { > - addr = load_reg(s, a->rn); > - } > + addr = add_reg_for_lit(s, a->rn, 0); > if (a->p) { > /* pre-decrement */ > tcg_gen_addi_i32(addr, addr, -(a->imm << 2)); > diff --git a/target/arm/translate.c b/target/arm/translate.c > index a48e9a90f8..5e2dd8bb16 100644 > --- a/target/arm/translate.c > +++ b/target/arm/translate.c > @@ -214,6 +214,23 @@ static inline TCGv_i32 load_reg(DisasContext *s, int reg) > return tmp; > } > > +/* > + * Create a new temp, incremented by OFS, except PC is aligned but not > + * incremented for thumb. This is used for load/store for which use of > + * PC implies (literal), or ADD that implies ADR. > + */ > +static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs) > +{ > + TCGv_i32 tmp = tcg_temp_new_i32(); > + > + if (reg == 15) { > + tcg_gen_movi_i32(tmp, (s->pc_read & ~3) + ofs); > + } else { > + tcg_gen_addi_i32(tmp, cpu_R[reg], ofs); > + } > + return tmp; > +} This is losing the information in the comments about the UNPREDICTABLE cases. Are there callsites where the new function is called where "thumb and reg == 15" is not UNPREDICTABLE, or are they all that way? thanks -- PMM
On 7/29/19 7:15 AM, Peter Maydell wrote: > On Fri, 26 Jul 2019 at 18:50, Richard Henderson > <richard.henderson@linaro.org> wrote: >> >> Used only on the thumb side so far, but will be more obvious >> once we start unifying the implementation of A32+T32. >> >> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> >> --- >> target/arm/translate-vfp.inc.c | 34 +------ >> target/arm/translate.c | 163 +++++++++++++++------------------ >> 2 files changed, 76 insertions(+), 121 deletions(-) >> >> diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c >> index e7389bc057..4066b2febf 100644 >> --- a/target/arm/translate-vfp.inc.c >> +++ b/target/arm/translate-vfp.inc.c >> @@ -941,14 +941,7 @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a) >> offset = -offset; >> } >> >> - if (s->thumb && a->rn == 15) { >> - /* This is actually UNPREDICTABLE */ >> - addr = tcg_temp_new_i32(); >> - tcg_gen_movi_i32(addr, s->pc & ~2); >> - } else { >> - addr = load_reg(s, a->rn); >> - } >> - tcg_gen_addi_i32(addr, addr, offset); >> + addr = add_reg_for_lit(s, a->rn, offset); >> tmp = tcg_temp_new_i32(); >> if (a->l) { >> gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); >> @@ -983,14 +976,7 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a) >> offset = -offset; >> } >> >> - if (s->thumb && a->rn == 15) { >> - /* This is actually UNPREDICTABLE */ >> - addr = tcg_temp_new_i32(); >> - tcg_gen_movi_i32(addr, s->pc & ~2); >> - } else { >> - addr = load_reg(s, a->rn); >> - } >> - tcg_gen_addi_i32(addr, addr, offset); >> + addr = add_reg_for_lit(s, a->rn, offset); >> tmp = tcg_temp_new_i64(); >> if (a->l) { >> gen_aa32_ld64(s, tmp, addr, get_mem_index(s)); >> @@ -1029,13 +1015,7 @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a) >> return true; >> } >> >> - if (s->thumb && a->rn == 15) { >> - /* This is actually UNPREDICTABLE */ >> - addr = tcg_temp_new_i32(); >> - tcg_gen_movi_i32(addr, s->pc & ~2); >> - } else { >> - addr = load_reg(s, a->rn); >> - } >> + addr = add_reg_for_lit(s, a->rn, 0); >> if (a->p) { >> /* pre-decrement */ >> tcg_gen_addi_i32(addr, addr, -(a->imm << 2)); >> @@ -1112,13 +1092,7 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) >> return true; >> } >> >> - if (s->thumb && a->rn == 15) { >> - /* This is actually UNPREDICTABLE */ >> - addr = tcg_temp_new_i32(); >> - tcg_gen_movi_i32(addr, s->pc & ~2); >> - } else { >> - addr = load_reg(s, a->rn); >> - } >> + addr = add_reg_for_lit(s, a->rn, 0); >> if (a->p) { >> /* pre-decrement */ >> tcg_gen_addi_i32(addr, addr, -(a->imm << 2)); >> diff --git a/target/arm/translate.c b/target/arm/translate.c >> index a48e9a90f8..5e2dd8bb16 100644 >> --- a/target/arm/translate.c >> +++ b/target/arm/translate.c >> @@ -214,6 +214,23 @@ static inline TCGv_i32 load_reg(DisasContext *s, int reg) >> return tmp; >> } >> >> +/* >> + * Create a new temp, incremented by OFS, except PC is aligned but not >> + * incremented for thumb. This is used for load/store for which use of >> + * PC implies (literal), or ADD that implies ADR. >> + */ >> +static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs) >> +{ >> + TCGv_i32 tmp = tcg_temp_new_i32(); >> + >> + if (reg == 15) { >> + tcg_gen_movi_i32(tmp, (s->pc_read & ~3) + ofs); >> + } else { >> + tcg_gen_addi_i32(tmp, cpu_R[reg], ofs); >> + } >> + return tmp; >> +} > > This is losing the information in the comments about the UNPREDICTABLE > cases. Are there callsites where the new function is called where > "thumb and reg == 15" is not UNPREDICTABLE, or are they all > that way? These call sites are that way, but this function will eventually be used for LDR (literal) and ADR, which obviously are not UNPREDICTABLE. I don't think this comment attached to this code is useful as-is. Either we do the natural a32-ish behaviour and use ALIGN(PC,4), or we should gen_illegal_op() and be done with it. Would you prefer a function like /* Use of PC is UNPREDICTABLE in thumb mode, but allowed in arm mode. */ static TCGv_i32 load_reg_nothumbpc(DisasContext *s, int reg) { if (unlikely(reg == 15) && s->thumb) { gen_illegal_op(s); /* Unreachable tcg ops will be deleted but must still be legal. */ return tcg_const_i32(0); } return load_reg(s, reg); } for these specific usages? r~
On Tue, 30 Jul 2019 at 01:51, Richard Henderson <richard.henderson@linaro.org> wrote: > > On 7/29/19 7:15 AM, Peter Maydell wrote: > > On Fri, 26 Jul 2019 at 18:50, Richard Henderson > > <richard.henderson@linaro.org> wrote: > >> > >> Used only on the thumb side so far, but will be more obvious > >> once we start unifying the implementation of A32+T32. > >> > >> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > >> --- > > This is losing the information in the comments about the UNPREDICTABLE > > cases. Are there callsites where the new function is called where > > "thumb and reg == 15" is not UNPREDICTABLE, or are they all > > that way? > > These call sites are that way, but this function will eventually be used for > LDR (literal) and ADR, which obviously are not UNPREDICTABLE. > > I don't think this comment attached to this code is useful as-is. Either we do > the natural a32-ish behaviour and use ALIGN(PC,4), or we should > gen_illegal_op() and be done with it. I think it's usually worth noting when something's UNPREDICTABLE and we're choosing to take the falls-out-of-the-code behaviour, that's all. > Would you prefer a function like > > /* Use of PC is UNPREDICTABLE in thumb mode, but allowed in arm mode. */ > static TCGv_i32 load_reg_nothumbpc(DisasContext *s, int reg) > { > if (unlikely(reg == 15) && s->thumb) { > gen_illegal_op(s); > /* Unreachable tcg ops will be deleted but must still be legal. */ > return tcg_const_i32(0); > } > return load_reg(s, reg); > } > > for these specific usages? I definitely don't favour this -- all our "is this going to UNDEF" checks should go right at the start before we generate any TCG code at all for the insn. One of the things I'm hoping this series cleans up is that the current decoder is quite bad at sometimes detecting UNDEF conditions late (which then results in warnings about potential leaks of TCG variables). thanks -- PMM
diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index e7389bc057..4066b2febf 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -941,14 +941,7 @@ static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a) offset = -offset; } - if (s->thumb && a->rn == 15) { - /* This is actually UNPREDICTABLE */ - addr = tcg_temp_new_i32(); - tcg_gen_movi_i32(addr, s->pc & ~2); - } else { - addr = load_reg(s, a->rn); - } - tcg_gen_addi_i32(addr, addr, offset); + addr = add_reg_for_lit(s, a->rn, offset); tmp = tcg_temp_new_i32(); if (a->l) { gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); @@ -983,14 +976,7 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a) offset = -offset; } - if (s->thumb && a->rn == 15) { - /* This is actually UNPREDICTABLE */ - addr = tcg_temp_new_i32(); - tcg_gen_movi_i32(addr, s->pc & ~2); - } else { - addr = load_reg(s, a->rn); - } - tcg_gen_addi_i32(addr, addr, offset); + addr = add_reg_for_lit(s, a->rn, offset); tmp = tcg_temp_new_i64(); if (a->l) { gen_aa32_ld64(s, tmp, addr, get_mem_index(s)); @@ -1029,13 +1015,7 @@ static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a) return true; } - if (s->thumb && a->rn == 15) { - /* This is actually UNPREDICTABLE */ - addr = tcg_temp_new_i32(); - tcg_gen_movi_i32(addr, s->pc & ~2); - } else { - addr = load_reg(s, a->rn); - } + addr = add_reg_for_lit(s, a->rn, 0); if (a->p) { /* pre-decrement */ tcg_gen_addi_i32(addr, addr, -(a->imm << 2)); @@ -1112,13 +1092,7 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) return true; } - if (s->thumb && a->rn == 15) { - /* This is actually UNPREDICTABLE */ - addr = tcg_temp_new_i32(); - tcg_gen_movi_i32(addr, s->pc & ~2); - } else { - addr = load_reg(s, a->rn); - } + addr = add_reg_for_lit(s, a->rn, 0); if (a->p) { /* pre-decrement */ tcg_gen_addi_i32(addr, addr, -(a->imm << 2)); diff --git a/target/arm/translate.c b/target/arm/translate.c index a48e9a90f8..5e2dd8bb16 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -214,6 +214,23 @@ static inline TCGv_i32 load_reg(DisasContext *s, int reg) return tmp; } +/* + * Create a new temp, incremented by OFS, except PC is aligned but not + * incremented for thumb. This is used for load/store for which use of + * PC implies (literal), or ADD that implies ADR. + */ +static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs) +{ + TCGv_i32 tmp = tcg_temp_new_i32(); + + if (reg == 15) { + tcg_gen_movi_i32(tmp, (s->pc_read & ~3) + ofs); + } else { + tcg_gen_addi_i32(tmp, cpu_R[reg], ofs); + } + return tmp; +} + /* Set a CPU register. The source must be a temporary and will be marked as dead. */ static void store_reg(DisasContext *s, int reg, TCGv_i32 var) @@ -9468,16 +9485,12 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn) */ bool wback = extract32(insn, 21, 1); - if (rn == 15) { - if (insn & (1 << 21)) { - /* UNPREDICTABLE */ - goto illegal_op; - } - addr = tcg_temp_new_i32(); - tcg_gen_movi_i32(addr, s->pc_read & ~3); - } else { - addr = load_reg(s, rn); + if (rn == 15 && (insn & (1 << 21))) { + /* UNPREDICTABLE */ + goto illegal_op; } + + addr = add_reg_for_lit(s, rn, 0); offset = (insn & 0xff) * 4; if ((insn & (1 << 23)) == 0) { offset = -offset; @@ -10683,27 +10696,15 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn) store_reg(s, rd, tmp); } else { /* Add/sub 12-bit immediate. */ - if (rn == 15) { - offset = s->pc_read & ~(uint32_t)3; - if (insn & (1 << 23)) - offset -= imm; - else - offset += imm; - tmp = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp, offset); - store_reg(s, rd, tmp); + if (insn & (1 << 23)) { + imm = -imm; + } + tmp = add_reg_for_lit(s, rn, imm); + if (rn == 13 && rd == 13) { + /* ADD SP, SP, imm or SUB SP, SP, imm */ + store_sp_checked(s, tmp); } else { - tmp = load_reg(s, rn); - if (insn & (1 << 23)) - tcg_gen_subi_i32(tmp, tmp, imm); - else - tcg_gen_addi_i32(tmp, tmp, imm); - if (rn == 13 && rd == 13) { - /* ADD SP, SP, imm or SUB SP, SP, imm */ - store_sp_checked(s, tmp); - } else { - store_reg(s, rd, tmp); - } + store_reg(s, rd, tmp); } } } @@ -10817,60 +10818,51 @@ static void disas_thumb2_insn(DisasContext *s, uint32_t insn) } } memidx = get_mem_index(s); - if (rn == 15) { - addr = tcg_temp_new_i32(); - /* PC relative. */ - imm = s->pc_read & 0xfffffffc; - if (insn & (1 << 23)) - imm += insn & 0xfff; - else - imm -= insn & 0xfff; - tcg_gen_movi_i32(addr, imm); + imm = insn & 0xfff; + if (insn & (1 << 23)) { + /* PC relative or Positive offset. */ + addr = add_reg_for_lit(s, rn, imm); + } else if (rn == 15) { + /* PC relative with negative offset. */ + addr = add_reg_for_lit(s, rn, -imm); } else { addr = load_reg(s, rn); - if (insn & (1 << 23)) { - /* Positive offset. */ - imm = insn & 0xfff; - tcg_gen_addi_i32(addr, addr, imm); - } else { - imm = insn & 0xff; - switch ((insn >> 8) & 0xf) { - case 0x0: /* Shifted Register. */ - shift = (insn >> 4) & 0xf; - if (shift > 3) { - tcg_temp_free_i32(addr); - goto illegal_op; - } - tmp = load_reg(s, rm); - if (shift) - tcg_gen_shli_i32(tmp, tmp, shift); - tcg_gen_add_i32(addr, addr, tmp); - tcg_temp_free_i32(tmp); - break; - case 0xc: /* Negative offset. */ - tcg_gen_addi_i32(addr, addr, -imm); - break; - case 0xe: /* User privilege. */ - tcg_gen_addi_i32(addr, addr, imm); - memidx = get_a32_user_mem_index(s); - break; - case 0x9: /* Post-decrement. */ - imm = -imm; - /* Fall through. */ - case 0xb: /* Post-increment. */ - postinc = 1; - writeback = 1; - break; - case 0xd: /* Pre-decrement. */ - imm = -imm; - /* Fall through. */ - case 0xf: /* Pre-increment. */ - writeback = 1; - break; - default: + imm = insn & 0xff; + switch ((insn >> 8) & 0xf) { + case 0x0: /* Shifted Register. */ + shift = (insn >> 4) & 0xf; + if (shift > 3) { tcg_temp_free_i32(addr); goto illegal_op; } + tmp = load_reg(s, rm); + tcg_gen_shli_i32(tmp, tmp, shift); + tcg_gen_add_i32(addr, addr, tmp); + tcg_temp_free_i32(tmp); + break; + case 0xc: /* Negative offset. */ + tcg_gen_addi_i32(addr, addr, -imm); + break; + case 0xe: /* User privilege. */ + tcg_gen_addi_i32(addr, addr, imm); + memidx = get_a32_user_mem_index(s); + break; + case 0x9: /* Post-decrement. */ + imm = -imm; + /* Fall through. */ + case 0xb: /* Post-increment. */ + postinc = 1; + writeback = 1; + break; + case 0xd: /* Pre-decrement. */ + imm = -imm; + /* Fall through. */ + case 0xf: /* Pre-increment. */ + writeback = 1; + break; + default: + tcg_temp_free_i32(addr); + goto illegal_op; } } @@ -11066,10 +11058,7 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn) if (insn & (1 << 11)) { rd = (insn >> 8) & 7; /* load pc-relative. Bit 1 of PC is ignored. */ - val = s->pc_read + ((insn & 0xff) * 4); - val &= ~(uint32_t)2; - addr = tcg_temp_new_i32(); - tcg_gen_movi_i32(addr, val); + addr = add_reg_for_lit(s, 15, (insn & 0xff) * 4); tmp = tcg_temp_new_i32(); gen_aa32_ld32u_iss(s, tmp, addr, get_mem_index(s), rd | ISSIs16Bit); @@ -11447,16 +11436,8 @@ static void disas_thumb_insn(DisasContext *s, uint32_t insn) * - Add PC/SP (immediate) */ rd = (insn >> 8) & 7; - if (insn & (1 << 11)) { - /* SP */ - tmp = load_reg(s, 13); - } else { - /* PC. bit 1 is ignored. */ - tmp = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp, s->pc_read & ~(uint32_t)2); - } val = (insn & 0xff) * 4; - tcg_gen_addi_i32(tmp, tmp, val); + tmp = add_reg_for_lit(s, insn & (1 << 11) ? 13 : 15, val); store_reg(s, rd, tmp); break;
Used only on the thumb side so far, but will be more obvious once we start unifying the implementation of A32+T32. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/arm/translate-vfp.inc.c | 34 +------ target/arm/translate.c | 163 +++++++++++++++------------------ 2 files changed, 76 insertions(+), 121 deletions(-) -- 2.17.1