Message ID | 20211029043329.1518029-29-richard.henderson@linaro.org |
---|---|
State | Accepted |
Commit | dcd08996c9420a0d22399e0cc53117d2043a02bb |
Headers | show |
Series | tcg patch queue | expand |
On Fri, 29 Oct 2021 at 05:59, Richard Henderson <richard.henderson@linaro.org> wrote: > > Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> > Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > tcg/optimize.c | 39 ++++++++++++++++++++++----------------- > 1 file changed, 22 insertions(+), 17 deletions(-) > > diff --git a/tcg/optimize.c b/tcg/optimize.c > index 110b3d1cc2..faedbdbfb8 100644 > --- a/tcg/optimize.c > +++ b/tcg/optimize.c > @@ -888,6 +888,25 @@ static bool fold_eqv(OptContext *ctx, TCGOp *op) > return fold_const2(ctx, op); > } Hi; Coverity warns about a shift in here (CID 1465220): > > +static bool fold_extract2(OptContext *ctx, TCGOp *op) > +{ > + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { > + uint64_t v1 = arg_info(op->args[1])->val; > + uint64_t v2 = arg_info(op->args[2])->val; > + int shr = op->args[3]; > + > + if (op->opc == INDEX_op_extract2_i64) { > + v1 >>= shr; > + v2 <<= 64 - shr; > + } else { > + v1 = (uint32_t)v1 >> shr; > + v2 = (int32_t)v2 << (32 - shr); Here we do the shift at 32-bits and then assign it into a 64-bit variable, which triggers Coverity's usual OVERFLOW_BEFORE_WIDEN heuristic. Is the 32-bitness intentional? > + } > + return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2); > + } > + return false; > +} -- PMM
On 11/9/21 5:52 PM, Peter Maydell wrote: > On Fri, 29 Oct 2021 at 05:59, Richard Henderson > <richard.henderson@linaro.org> wrote: >> >> Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> >> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> >> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> >> --- >> tcg/optimize.c | 39 ++++++++++++++++++++++----------------- >> 1 file changed, 22 insertions(+), 17 deletions(-) >> >> diff --git a/tcg/optimize.c b/tcg/optimize.c >> index 110b3d1cc2..faedbdbfb8 100644 >> --- a/tcg/optimize.c >> +++ b/tcg/optimize.c >> @@ -888,6 +888,25 @@ static bool fold_eqv(OptContext *ctx, TCGOp *op) >> return fold_const2(ctx, op); >> } > > Hi; Coverity warns about a shift in here (CID 1465220): > >> >> +static bool fold_extract2(OptContext *ctx, TCGOp *op) >> +{ >> + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { >> + uint64_t v1 = arg_info(op->args[1])->val; >> + uint64_t v2 = arg_info(op->args[2])->val; >> + int shr = op->args[3]; >> + >> + if (op->opc == INDEX_op_extract2_i64) { >> + v1 >>= shr; >> + v2 <<= 64 - shr; >> + } else { >> + v1 = (uint32_t)v1 >> shr; >> + v2 = (int32_t)v2 << (32 - shr); > > Here we do the shift at 32-bits and then assign it into a 64-bit > variable, which triggers Coverity's usual OVERFLOW_BEFORE_WIDEN > heuristic. Is the 32-bitness intentional? Yep. I'll add another cast. r~
diff --git a/tcg/optimize.c b/tcg/optimize.c index 110b3d1cc2..faedbdbfb8 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -888,6 +888,25 @@ static bool fold_eqv(OptContext *ctx, TCGOp *op) return fold_const2(ctx, op); } +static bool fold_extract2(OptContext *ctx, TCGOp *op) +{ + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { + uint64_t v1 = arg_info(op->args[1])->val; + uint64_t v2 = arg_info(op->args[2])->val; + int shr = op->args[3]; + + if (op->opc == INDEX_op_extract2_i64) { + v1 >>= shr; + v2 <<= 64 - shr; + } else { + v1 = (uint32_t)v1 >> shr; + v2 = (int32_t)v2 << (32 - shr); + } + return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2); + } + return false; +} + static bool fold_exts(OptContext *ctx, TCGOp *op) { return fold_const1(ctx, op); @@ -1726,23 +1745,6 @@ void tcg_optimize(TCGContext *s) } break; - CASE_OP_32_64(extract2): - if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { - uint64_t v1 = arg_info(op->args[1])->val; - uint64_t v2 = arg_info(op->args[2])->val; - int shr = op->args[3]; - - if (opc == INDEX_op_extract2_i64) { - tmp = (v1 >> shr) | (v2 << (64 - shr)); - } else { - tmp = (int32_t)(((uint32_t)v1 >> shr) | - ((uint32_t)v2 << (32 - shr))); - } - tcg_opt_gen_movi(&ctx, op, op->args[0], tmp); - continue; - } - break; - default: break; @@ -1777,6 +1779,9 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64(eqv): done = fold_eqv(&ctx, op); break; + CASE_OP_32_64(extract2): + done = fold_extract2(&ctx, op); + break; CASE_OP_32_64(ext8s): CASE_OP_32_64(ext16s): case INDEX_op_ext32s_i64: