@@ -373,28 +373,6 @@ static void per_branch_disp(DisasContext *s, int64_t disp)
#endif
}
-static void per_branch_cond(DisasContext *s, TCGCond cond,
- TCGv_i64 arg1, TCGv_i64 arg2)
-{
-#ifndef CONFIG_USER_ONLY
- if (per_enabled(s)) {
- TCGLabel *lab = gen_new_label();
- tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
-
- gen_psw_addr_disp(s, gbea, 0);
- gen_helper_per_branch(cpu_env, gbea, psw_addr);
-
- gen_set_label(lab);
- } else {
- TCGv_i64 pc = tcg_temp_new_i64();
-
- gen_psw_addr_disp(s, pc, 0);
- tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
- tcg_temp_free_i64(pc);
- }
-#endif
-}
-
static void per_breaking_event(DisasContext *s)
{
#ifndef CONFIG_USER_ONLY
@@ -1205,7 +1183,6 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
{
DisasJumpType ret;
int64_t disp = (int64_t)imm * 2;
- uint64_t dest = s->base.pc_next + disp;
TCGLabel *lab;
/* Take care of the special cases first. */
@@ -1235,96 +1212,53 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
}
}
- if (use_goto_tb(s, s->base.pc_next + s->ilen)) {
- if (is_imm && use_goto_tb(s, dest)) {
- /* Both exits can use goto_tb. */
- update_cc_op(s);
+ update_cc_op(s);
- lab = gen_new_label();
- if (c->is_64) {
- tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
- } else {
- tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
- }
-
- /* Branch not taken. */
- tcg_gen_goto_tb(0);
- gen_psw_addr_disp(s, psw_addr, s->ilen);
- tcg_gen_exit_tb(s->base.tb, 0);
-
- /* Branch taken. */
- gen_set_label(lab);
- per_breaking_event(s);
- tcg_gen_goto_tb(1);
- gen_psw_addr_disp(s, psw_addr, disp);
- tcg_gen_exit_tb(s->base.tb, 1);
-
- ret = DISAS_NORETURN;
- } else {
- /* Fallthru can use goto_tb, but taken branch cannot. */
- /* Store taken branch destination before the brcond. This
- avoids having to allocate a new local temp to hold it.
- We'll overwrite this in the not taken case anyway. */
- if (!is_imm) {
- tcg_gen_mov_i64(psw_addr, cdest);
- }
-
- lab = gen_new_label();
- if (c->is_64) {
- tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
- } else {
- tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
- }
-
- /* Branch not taken. */
- update_cc_op(s);
- tcg_gen_goto_tb(0);
- gen_psw_addr_disp(s, psw_addr, s->ilen);
- tcg_gen_exit_tb(s->base.tb, 0);
-
- gen_set_label(lab);
- if (is_imm) {
- gen_psw_addr_disp(s, psw_addr, disp);
- }
- per_breaking_event(s);
- ret = DISAS_PC_UPDATED;
- }
- } else {
- /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
- Most commonly we're single-stepping or some other condition that
- disables all use of goto_tb. Just update the PC and exit. */
-
- TCGv_i64 next = tcg_temp_new_i64();
-
- gen_psw_addr_disp(s, next, s->ilen);
- if (is_imm) {
- cdest = tcg_temp_new_i64();
- gen_psw_addr_disp(s, cdest, disp);
- }
-
- if (c->is_64) {
- tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
- cdest, next);
- per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
- } else {
- TCGv_i32 t0 = tcg_temp_new_i32();
- TCGv_i64 t1 = tcg_temp_new_i64();
- TCGv_i64 z = tcg_constant_i64(0);
- tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
- tcg_gen_extu_i32_i64(t1, t0);
- tcg_temp_free_i32(t0);
- tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
- per_branch_cond(s, TCG_COND_NE, t1, z);
- tcg_temp_free_i64(t1);
- }
-
- tcg_temp_free_i64(next);
- if (is_imm) {
- tcg_temp_free_i64(cdest);
- }
- ret = DISAS_PC_UPDATED;
+ /*
+ * Store taken branch destination before the brcond. This
+ * avoids having to allocate a new local temp to hold it.
+ * We'll overwrite this in the not taken case anyway.
+ */
+ if (!is_imm) {
+ tcg_gen_mov_i64(psw_addr, cdest);
}
+ lab = gen_new_label();
+ if (c->is_64) {
+ tcg_gen_brcond_i64(tcg_invert_cond(c->cond),
+ c->u.s64.a, c->u.s64.b, lab);
+ } else {
+ tcg_gen_brcond_i32(tcg_invert_cond(c->cond),
+ c->u.s32.a, c->u.s32.b, lab);
+ }
+
+ /* Branch taken. */
+ if (is_imm) {
+ gen_psw_addr_disp(s, psw_addr, disp);
+ }
+ per_branch_dest(s, psw_addr);
+
+ if (is_imm && use_goto_tb(s, s->base.pc_next + disp)) {
+ tcg_gen_goto_tb(0);
+ tcg_gen_exit_tb(s->base.tb, 0);
+ } else {
+ tcg_gen_lookup_and_goto_ptr();
+ }
+
+ gen_set_label(lab);
+
+ /* Branch not taken. */
+ if (use_goto_tb(s, s->base.pc_next + s->ilen)) {
+ tcg_gen_goto_tb(1);
+ gen_psw_addr_disp(s, psw_addr, s->ilen);
+ tcg_gen_exit_tb(s->base.tb, 1);
+ } else {
+ gen_psw_addr_disp(s, psw_addr, s->ilen);
+ tcg_gen_lookup_and_goto_ptr();
+ }
+
+ ret = DISAS_NORETURN;
+
egress:
free_compare(c);
return ret;
Always use a tcg branch, instead of movcond. The movcond was not a bad idea before PER was added, but since then we have either 2 or 3 actions to perform on each leg of the branch, and multiple movcond is inefficient. Reorder the taken branch to be fallthrough of the tcg branch. This will be helpful later with TARGET_TB_PCREL. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/s390x/tcg/translate.c | 154 ++++++++++------------------------- 1 file changed, 44 insertions(+), 110 deletions(-)