@@ -891,6 +891,24 @@ static bool fold_multiply(OptContext *ctx, TCGOp *op)
return fold_const2(ctx, op);
}
+static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
+{
+ if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
+ uint32_t a = arg_info(op->args[2])->val;
+ uint32_t b = arg_info(op->args[3])->val;
+ uint64_t r = (uint64_t)a * b;
+ TCGArg rl, rh;
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
+
+ rl = op->args[0];
+ rh = op->args[1];
+ tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
+ tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
+ return true;
+ }
+ return false;
+}
+
static bool fold_nand(OptContext *ctx, TCGOp *op)
{
return fold_const2(ctx, op);
@@ -1697,22 +1715,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- case INDEX_op_mulu2_i32:
- if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
- uint32_t a = arg_info(op->args[2])->val;
- uint32_t b = arg_info(op->args[3])->val;
- uint64_t r = (uint64_t)a * b;
- TCGArg rl, rh;
- TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32);
-
- rl = op->args[0];
- rh = op->args[1];
- tcg_opt_gen_movi(&ctx, op, rl, (int32_t)r);
- tcg_opt_gen_movi(&ctx, op2, rh, (int32_t)(r >> 32));
- continue;
- }
- break;
-
default:
break;
@@ -1766,6 +1768,9 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(muluh):
done = fold_multiply(&ctx, op);
break;
+ case INDEX_op_mulu2_i32:
+ done = fold_mulu2_i32(&ctx, op);
+ break;
CASE_OP_32_64(nand):
done = fold_nand(&ctx, op);
break;
Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- tcg/optimize.c | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) -- 2.25.1