@@ -2950,6 +2950,8 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
case INDEX_op_shrv_vec:
case INDEX_op_sarv_vec:
case INDEX_op_rotli_vec:
+ case INDEX_op_rotlv_vec:
+ case INDEX_op_rotrv_vec:
return -1;
default:
return 0;
@@ -2960,7 +2962,7 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
TCGArg a0, ...)
{
va_list va;
- TCGv_vec v0, v1, v2, t1;
+ TCGv_vec v0, v1, v2, t1, t2, t3;
TCGArg a2;
va_start(va, a0);
@@ -3003,6 +3005,35 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
tcg_temp_free_vec(t1);
break;
+ case INDEX_op_rotlv_vec:
+ t1 = tcg_temp_new_vec(type);
+ t2 = tcg_constant_vec(type, vece, 8 << vece);
+ tcg_gen_sub_vec(vece, t1, v2, t2);
+ /* Right shifts are negative left shifts for AArch64. */
+ vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
+ tcgv_vec_arg(v1), tcgv_vec_arg(t1));
+ vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
+ tcgv_vec_arg(v1), tcgv_vec_arg(v2));
+ tcg_gen_or_vec(vece, v0, v0, t1);
+ tcg_temp_free_vec(t1);
+ break;
+
+ case INDEX_op_rotrv_vec:
+ t1 = tcg_temp_new_vec(type);
+ t2 = tcg_temp_new_vec(type);
+ t3 = tcg_constant_vec(type, vece, 8 << vece);
+ tcg_gen_neg_vec(vece, t1, v2);
+ tcg_gen_sub_vec(vece, t2, t3, v2);
+ /* Right shifts are negative left shifts for AArch64. */
+ vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
+ tcgv_vec_arg(v1), tcgv_vec_arg(t1));
+ vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2),
+ tcgv_vec_arg(v1), tcgv_vec_arg(t2));
+ tcg_gen_or_vec(vece, v0, t1, t2);
+ tcg_temp_free_vec(t1);
+ tcg_temp_free_vec(t2);
+ break;
+
default:
g_assert_not_reached();
}
Implement via expansion, so don't actually set TCG_TARGET_HAS_rotv_vec. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- tcg/arm/tcg-target.c.inc | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) -- 2.25.1