@@ -915,6 +915,31 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
return false;
}
+static bool fold_dup(OptContext *ctx, TCGOp *op)
+{
+ if (arg_is_const(op->args[1])) {
+ uint64_t t = arg_info(op->args[1])->val;
+ t = dup_const(TCGOP_VECE(op), t);
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
+ }
+ return false;
+}
+
+static bool fold_dup2(OptContext *ctx, TCGOp *op)
+{
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
+ uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
+ arg_info(op->args[2])->val);
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
+ }
+
+ if (args_are_copies(op->args[1], op->args[2])) {
+ op->opc = INDEX_op_dup_vec;
+ TCGOP_VECE(op) = MO_32;
+ }
+ return false;
+}
+
static bool fold_eqv(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
@@ -1716,28 +1741,6 @@ void tcg_optimize(TCGContext *s)
done = tcg_opt_gen_mov(&ctx, op, op->args[0], op->args[1]);
break;
- case INDEX_op_dup_vec:
- if (arg_is_const(op->args[1])) {
- tmp = arg_info(op->args[1])->val;
- tmp = dup_const(TCGOP_VECE(op), tmp);
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
- continue;
- }
- break;
-
- case INDEX_op_dup2_vec:
- assert(TCG_TARGET_REG_BITS == 32);
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
- tcg_opt_gen_movi(&ctx, op, op->args[0],
- deposit64(arg_info(op->args[1])->val, 32, 32,
- arg_info(op->args[2])->val));
- continue;
- } else if (args_are_copies(op->args[1], op->args[2])) {
- op->opc = INDEX_op_dup_vec;
- TCGOP_VECE(op) = MO_32;
- }
- break;
-
default:
break;
@@ -1781,6 +1784,12 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(divu):
done = fold_const2(&ctx, op);
break;
+ case INDEX_op_dup_vec:
+ done = fold_dup(&ctx, op);
+ break;
+ case INDEX_op_dup2_vec:
+ done = fold_dup2(&ctx, op);
+ break;
CASE_OP_32_64(eqv):
done = fold_eqv(&ctx, op);
break;
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- tcg/optimize.c | 53 +++++++++++++++++++++++++++++--------------------- 1 file changed, 31 insertions(+), 22 deletions(-) -- 2.25.1