@@ -660,6 +660,60 @@ static void finish_folding(OptContext *ctx, TCGOp *op)
}
}
+/*
+ * The fold_* functions return true when processing is complete,
+ * usually by folding the operation to a constant or to a copy,
+ * and calling tcg_opt_gen_{mov,movi}. They may do other things,
+ * like collect information about the value produced, for use in
+ * optimizing a subsequent operation.
+ *
+ * These first fold_* functions are all helpers, used by other
+ * folders for more specific operations.
+ */
+
+static bool fold_const1(OptContext *ctx, TCGOp *op)
+{
+ if (arg_is_const(op->args[1])) {
+ uint64_t t;
+
+ t = arg_info(op->args[1])->val;
+ t = do_constant_folding(op->opc, t, 0);
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t);
+ }
+ return false;
+}
+
+static bool fold_const2(OptContext *ctx, TCGOp *op)
+{
+ if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
+ uint64_t t1 = arg_info(op->args[1])->val;
+ uint64_t t2 = arg_info(op->args[2])->val;
+
+ t1 = do_constant_folding(op->opc, t1, t2);
+ return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
+ }
+ return false;
+}
+
+/*
+ * These outermost fold_<op> functions are sorted alphabetically.
+ */
+
+static bool fold_add(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_and(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_andc(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
static bool fold_call(OptContext *ctx, TCGOp *op)
{
TCGContext *s = ctx->tcg;
@@ -692,6 +746,26 @@ static bool fold_call(OptContext *ctx, TCGOp *op)
return true;
}
+static bool fold_ctpop(OptContext *ctx, TCGOp *op)
+{
+ return fold_const1(ctx, op);
+}
+
+static bool fold_eqv(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_exts(OptContext *ctx, TCGOp *op)
+{
+ return fold_const1(ctx, op);
+}
+
+static bool fold_extu(OptContext *ctx, TCGOp *op)
+{
+ return fold_const1(ctx, op);
+}
+
static bool fold_mb(OptContext *ctx, TCGOp *op)
{
/* Eliminate duplicate and redundant fence instructions. */
@@ -716,6 +790,41 @@ static bool fold_mb(OptContext *ctx, TCGOp *op)
return true;
}
+static bool fold_multiply(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_nand(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_neg(OptContext *ctx, TCGOp *op)
+{
+ return fold_const1(ctx, op);
+}
+
+static bool fold_nor(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_not(OptContext *ctx, TCGOp *op)
+{
+ return fold_const1(ctx, op);
+}
+
+static bool fold_or(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_orc(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
{
/* Opcodes that touch guest memory stop the mb optimization. */
@@ -730,6 +839,21 @@ static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
return false;
}
+static bool fold_shift(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_sub(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
+static bool fold_xor(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op);
+}
+
/* Propagate constants and copies, fold constant expressions. */
void tcg_optimize(TCGContext *s)
{
@@ -1276,26 +1400,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- CASE_OP_32_64(not):
- CASE_OP_32_64(neg):
- CASE_OP_32_64(ext8s):
- CASE_OP_32_64(ext8u):
- CASE_OP_32_64(ext16s):
- CASE_OP_32_64(ext16u):
- CASE_OP_32_64(ctpop):
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_extrl_i64_i32:
- case INDEX_op_extrh_i64_i32:
- if (arg_is_const(op->args[1])) {
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0);
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
- continue;
- }
- break;
-
CASE_OP_32_64(bswap16):
CASE_OP_32_64(bswap32):
case INDEX_op_bswap64_i64:
@@ -1307,36 +1411,6 @@ void tcg_optimize(TCGContext *s)
}
break;
- CASE_OP_32_64(add):
- CASE_OP_32_64(sub):
- CASE_OP_32_64(mul):
- CASE_OP_32_64(or):
- CASE_OP_32_64(and):
- CASE_OP_32_64(xor):
- CASE_OP_32_64(shl):
- CASE_OP_32_64(shr):
- CASE_OP_32_64(sar):
- CASE_OP_32_64(rotl):
- CASE_OP_32_64(rotr):
- CASE_OP_32_64(andc):
- CASE_OP_32_64(orc):
- CASE_OP_32_64(eqv):
- CASE_OP_32_64(nand):
- CASE_OP_32_64(nor):
- CASE_OP_32_64(muluh):
- CASE_OP_32_64(mulsh):
- CASE_OP_32_64(div):
- CASE_OP_32_64(divu):
- CASE_OP_32_64(rem):
- CASE_OP_32_64(remu):
- if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
- tmp = do_constant_folding(opc, arg_info(op->args[1])->val,
- arg_info(op->args[2])->val);
- tcg_opt_gen_movi(&ctx, op, op->args[0], tmp);
- continue;
- }
- break;
-
CASE_OP_32_64(clz):
CASE_OP_32_64(ctz):
if (arg_is_const(op->args[1])) {
@@ -1637,9 +1711,71 @@ void tcg_optimize(TCGContext *s)
}
break;
+ default:
+ break;
+
+ /* ---------------------------------------------------------- */
+ /* Sorted alphabetically by opcode as much as possible. */
+
+ CASE_OP_32_64_VEC(add):
+ done = fold_add(&ctx, op);
+ break;
+ CASE_OP_32_64_VEC(and):
+ done = fold_and(&ctx, op);
+ break;
+ CASE_OP_32_64_VEC(andc):
+ done = fold_andc(&ctx, op);
+ break;
+ CASE_OP_32_64(ctpop):
+ done = fold_ctpop(&ctx, op);
+ break;
+ CASE_OP_32_64(div):
+ CASE_OP_32_64(divu):
+ done = fold_const2(&ctx, op);
+ break;
+ CASE_OP_32_64(eqv):
+ done = fold_eqv(&ctx, op);
+ break;
+ CASE_OP_32_64(ext8s):
+ CASE_OP_32_64(ext16s):
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_ext_i32_i64:
+ done = fold_exts(&ctx, op);
+ break;
+ CASE_OP_32_64(ext8u):
+ CASE_OP_32_64(ext16u):
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_extrl_i64_i32:
+ case INDEX_op_extrh_i64_i32:
+ done = fold_extu(&ctx, op);
+ break;
case INDEX_op_mb:
done = fold_mb(&ctx, op);
break;
+ CASE_OP_32_64(mul):
+ CASE_OP_32_64(mulsh):
+ CASE_OP_32_64(muluh):
+ done = fold_multiply(&ctx, op);
+ break;
+ CASE_OP_32_64(nand):
+ done = fold_nand(&ctx, op);
+ break;
+ CASE_OP_32_64(neg):
+ done = fold_neg(&ctx, op);
+ break;
+ CASE_OP_32_64(nor):
+ done = fold_nor(&ctx, op);
+ break;
+ CASE_OP_32_64_VEC(not):
+ done = fold_not(&ctx, op);
+ break;
+ CASE_OP_32_64_VEC(or):
+ done = fold_or(&ctx, op);
+ break;
+ CASE_OP_32_64_VEC(orc):
+ done = fold_orc(&ctx, op);
+ break;
case INDEX_op_qemu_ld_i32:
case INDEX_op_qemu_ld_i64:
done = fold_qemu_ld(&ctx, op);
@@ -1649,8 +1785,22 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_qemu_st_i64:
done = fold_qemu_st(&ctx, op);
break;
-
- default:
+ CASE_OP_32_64(rem):
+ CASE_OP_32_64(remu):
+ done = fold_const2(&ctx, op);
+ break;
+ CASE_OP_32_64(rotl):
+ CASE_OP_32_64(rotr):
+ CASE_OP_32_64(sar):
+ CASE_OP_32_64(shl):
+ CASE_OP_32_64(shr):
+ done = fold_shift(&ctx, op);
+ break;
+ CASE_OP_32_64_VEC(sub):
+ done = fold_sub(&ctx, op);
+ break;
+ CASE_OP_32_64_VEC(xor):
+ done = fold_xor(&ctx, op);
break;
}