diff mbox series

[30/48] tcg/optimize: Split out fold_xx_to_i

Message ID 20210830062451.639572-31-richard.henderson@linaro.org
State Superseded
Headers show
Series tcg: optimize redundant sign extensions | expand

Commit Message

Richard Henderson Aug. 30, 2021, 6:24 a.m. UTC
Pull the "op r, a, a => movi r, 0" optimization into a function,
and use it in the outer opcode fold functions.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

---
 tcg/optimize.c | 41 ++++++++++++++++++++++++-----------------
 1 file changed, 24 insertions(+), 17 deletions(-)

-- 
2.25.1

Comments

Philippe Mathieu-Daudé Aug. 30, 2021, 9:56 p.m. UTC | #1
On 8/30/21 8:24 AM, Richard Henderson wrote:
> Pull the "op r, a, a => movi r, 0" optimization into a function,

> and use it in the outer opcode fold functions.

> 

> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

> ---

>  tcg/optimize.c | 41 ++++++++++++++++++++++++-----------------

>  1 file changed, 24 insertions(+), 17 deletions(-)


Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
diff mbox series

Patch

diff --git a/tcg/optimize.c b/tcg/optimize.c
index ee674fe623..f48d0bc52d 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -695,6 +695,15 @@  static bool fold_const2(OptContext *ctx, TCGOp *op)
     return false;
 }
 
+/* If the binary operation has both arguments equal, fold to @i. */
+static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
+{
+    if (args_are_copies(op->args[1], op->args[2])) {
+        return tcg_opt_gen_movi(ctx, op, op->args[0], i);
+    }
+    return false;
+}
+
 /*
  * These outermost fold_<op> functions are sorted alphabetically.
  */
@@ -744,7 +753,11 @@  static bool fold_and(OptContext *ctx, TCGOp *op)
 
 static bool fold_andc(OptContext *ctx, TCGOp *op)
 {
-    return fold_const2(ctx, op);
+    if (fold_const2(ctx, op) ||
+        fold_xx_to_i(ctx, op, 0)) {
+        return true;
+    }
+    return false;
 }
 
 static bool fold_brcond(OptContext *ctx, TCGOp *op)
@@ -1211,7 +1224,11 @@  static bool fold_shift(OptContext *ctx, TCGOp *op)
 
 static bool fold_sub(OptContext *ctx, TCGOp *op)
 {
-    return fold_const2(ctx, op);
+    if (fold_const2(ctx, op) ||
+        fold_xx_to_i(ctx, op, 0)) {
+        return true;
+    }
+    return false;
 }
 
 static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
@@ -1221,7 +1238,11 @@  static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
 
 static bool fold_xor(OptContext *ctx, TCGOp *op)
 {
-    return fold_const2(ctx, op);
+    if (fold_const2(ctx, op) ||
+        fold_xx_to_i(ctx, op, 0)) {
+        return true;
+    }
+    return false;
 }
 
 /* Propagate constants and copies, fold constant expressions. */
@@ -1726,20 +1747,6 @@  void tcg_optimize(TCGContext *s)
             break;
         }
 
-        /* Simplify expression for "op r, a, a => movi r, 0" cases */
-        switch (opc) {
-        CASE_OP_32_64_VEC(andc):
-        CASE_OP_32_64_VEC(sub):
-        CASE_OP_32_64_VEC(xor):
-            if (args_are_copies(op->args[1], op->args[2])) {
-                tcg_opt_gen_movi(&ctx, op, op->args[0], 0);
-                continue;
-            }
-            break;
-        default:
-            break;
-        }
-
         /*
          * Process each opcode.
          * Sorted alphabetically by opcode as much as possible.