@@ -126,7 +126,11 @@ enum function_shape {
SHAPE_shift_right_imm,
/* sv<t0>_t svfoo_wide[_t0](sv<t0>_t, svuint64_t). */
- SHAPE_binary_wide
+ SHAPE_binary_wide,
+
+ /* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
+ sv<t0>_t svfoo[_t0](sv<t0>_t, uint64_t). */
+ SHAPE_shift_opt_n
};
/* Classifies an operation into "modes"; for example, to distinguish
@@ -172,6 +176,7 @@ enum function {
FUNC_svdup,
FUNC_sveor,
FUNC_svindex,
+ FUNC_svlsl,
FUNC_svlsl_wide,
FUNC_svmax,
FUNC_svmad,
@@ -479,6 +484,7 @@ private:
rtx expand_dup ();
rtx expand_eor ();
rtx expand_index ();
+ rtx expand_lsl ();
rtx expand_lsl_wide ();
rtx expand_max ();
rtx expand_min ();
@@ -912,6 +918,12 @@ arm_sve_h_builder::build (const function_group &group)
add_overloaded_functions (group, MODE_none);
build_all (&arm_sve_h_builder::sig_00i, group, MODE_none);
break;
+
+ case SHAPE_shift_opt_n:
+ add_overloaded_functions (group, MODE_none);
+ build_all (&arm_sve_h_builder::sig_000, group, MODE_none);
+ build_all (&arm_sve_h_builder::sig_n_00i, group, MODE_n);
+ break;
}
}
@@ -1222,6 +1234,7 @@ arm_sve_h_builder::get_attributes (const function_instance &instance)
case FUNC_svdup:
case FUNC_sveor:
case FUNC_svindex:
+ case FUNC_svlsl:
case FUNC_svlsl_wide:
case FUNC_svmax:
case FUNC_svmad:
@@ -1280,6 +1293,7 @@ arm_sve_h_builder::get_explicit_types (function_shape shape)
case SHAPE_ternary_qq_opt_n:
case SHAPE_shift_right_imm:
case SHAPE_binary_wide:
+ case SHAPE_shift_opt_n:
return 0;
}
gcc_unreachable ();
@@ -1347,6 +1361,7 @@ function_resolver::resolve ()
case SHAPE_unary:
return resolve_uniform (1);
case SHAPE_binary_opt_n:
+ case SHAPE_shift_opt_n:
return resolve_uniform (2);
case SHAPE_ternary_opt_n:
return resolve_uniform (3);
@@ -1706,6 +1721,7 @@ function_checker::check ()
case SHAPE_ternary_opt_n:
case SHAPE_ternary_qq_opt_n:
case SHAPE_binary_wide:
+ case SHAPE_shift_opt_n:
return true;
}
gcc_unreachable ();
@@ -1895,6 +1911,7 @@ gimple_folder::fold ()
case FUNC_svdup:
case FUNC_sveor:
case FUNC_svindex:
+ case FUNC_svlsl:
case FUNC_svlsl_wide:
case FUNC_svmax:
case FUNC_svmad:
@@ -2001,6 +2018,9 @@ function_expander::expand ()
case FUNC_svindex:
return expand_index ();
+ case FUNC_svlsl:
+ return expand_lsl ();
+
case FUNC_svlsl_wide:
return expand_lsl_wide ();
@@ -2175,6 +2195,30 @@ function_expander::expand_index ()
return expand_via_unpred_direct_optab (vec_series_optab);
}
+/* Expand a call to svlsl. */
+rtx
+function_expander::expand_lsl ()
+{
+ machine_mode mode = get_mode (0);
+ machine_mode elem_mode = GET_MODE_INNER (mode);
+
+ if (m_fi.mode == MODE_n
+ && mode != VNx2DImode
+ && !aarch64_simd_shift_imm_p (m_args[2], elem_mode, true))
+ return expand_lsl_wide ();
+
+ if (m_fi.pred == PRED_x)
+ {
+ insn_code icode = code_for_aarch64_pred (ASHIFT, mode);
+ return expand_via_pred_x_insn (icode);
+ }
+ else
+ {
+ insn_code icode = code_for_cond (ASHIFT, mode);
+ return expand_via_pred_insn (icode);
+ }
+}
+
/* Expand a call to svlsl_wide. */
rtx
function_expander::expand_lsl_wide ()
@@ -71,6 +71,7 @@ DEF_SVE_FUNCTION (svdot, ternary_qq_opt_n, sdi, none)
DEF_SVE_FUNCTION (svdup, unary_n, all_data, mxznone)
DEF_SVE_FUNCTION (sveor, binary_opt_n, all_integer, mxz)
DEF_SVE_FUNCTION (svindex, binary_scalar, all_data, none)
+DEF_SVE_FUNCTION (svlsl, shift_opt_n, all_integer, mxz)
DEF_SVE_FUNCTION (svlsl_wide, binary_wide, all_bhsi, mxz)
DEF_SVE_FUNCTION (svmax, binary_opt_n, all_data, mxz)
DEF_SVE_FUNCTION (svmin, binary_opt_n, all_data, mxz)
@@ -1510,20 +1510,21 @@
;; actually need the predicate for the first alternative, but using Upa
;; or X isn't likely to gain much and would make the instruction seem
;; less uniform to the register allocator.
-(define_insn "*v<optab><mode>3"
- [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w")
+(define_insn "@aarch64_pred_<optab><mode>"
+ [(set (match_operand:SVE_I 0 "register_operand" "=w, w, w, ?&w")
(unspec:SVE_I
- [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
(ASHIFT:SVE_I
- (match_operand:SVE_I 2 "register_operand" "w, 0, w")
- (match_operand:SVE_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, w, w"))]
+ (match_operand:SVE_I 2 "register_operand" "w, 0, w, w")
+ (match_operand:SVE_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, w, 0, w"))]
UNSPEC_MERGE_PTRUE))]
"TARGET_SVE"
"@
<shift>\t%0.<Vetype>, %2.<Vetype>, #%3
<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ <shift>r\t%0.<Vetype>, %1/m, %3.<Vetype>, %2.<Vetype>
movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
- [(set_attr "movprfx" "*,*,yes")]
+ [(set_attr "movprfx" "*,*,*,yes")]
)
;; LSL, LSR and ASR by a scalar, which expands into one of the vector
@@ -1553,6 +1554,53 @@
}
)
+(define_expand "@cond_<optab><mode>"
+ [(set (match_operand:SVE_I 0 "register_operand")
+ (unspec:SVE_I
+ [(match_operand:<VPRED> 1 "register_operand")
+ (ASHIFT:SVE_I
+ (match_operand:SVE_I 2 "register_operand")
+ (match_operand:SVE_I 3 "aarch64_sve_<lr>shift_operand"))
+ (match_operand:SVE_I 4 "aarch64_simd_reg_or_zero")]
+ UNSPEC_SEL))]
+ "TARGET_SVE"
+)
+
+(define_insn "*cond<optab><mode>_m"
+ [(set (match_operand:SVE_I 0 "register_operand" "=w, w, ?&w, ?&w")
+ (unspec:SVE_I
+ [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl, Upl")
+ (ASHIFT:SVE_I
+ (match_operand:SVE_I 2 "register_operand" "0, 0, w, w")
+ (match_operand:SVE_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, w, D<lr>, w"))
+ (match_dup 2)]
+ UNSPEC_SEL))]
+ "TARGET_SVE"
+ "@
+ <shift>\t%0.<Vetype>, %1/m, %2.<Vetype>, #%3
+ <shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>
+ movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ movprfx\t%0, %2\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ [(set_attr "movprfx" "*,*,yes,yes")]
+)
+
+(define_insn "*cond<optab><mode>_z"
+ [(set (match_operand:SVE_I 0 "register_operand" "=w, &w, ?&w")
+ (unspec:SVE_I
+ [(match_operand:<VPRED> 1 "register_operand" "Upl, Upl, Upl")
+ (ASHIFT:SVE_I
+ (match_operand:SVE_I 2 "register_operand" "w, w, 0w")
+ (match_operand:SVE_I 3 "aarch64_sve_<lr>shift_operand" "D<lr>, 0, w"))
+ (match_operand:SVE_I 4 "aarch64_simd_imm_zero")]
+ UNSPEC_SEL))]
+ "TARGET_SVE"
+ "@
+ movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, #%3
+ movprfx\t%0.<Vetype>, %1/z, %3.<Vetype>\;<shift>r\t%0.<Vetype>, %1/m, %3.<Vetype>, %2.<Vetype>
+ movprfx\t%0.<Vetype>, %1/z, %2.<Vetype>\;<shift>\t%0.<Vetype>, %1/m, %0.<Vetype>, %3.<Vetype>"
+ [(set_attr "movprfx" "yes,yes,yes")]
+)
+
(define_insn "@aarch64_pred_<sve_int_op><mode>"
[(set (match_operand:SVE_BHSI 0 "register_operand" "=w, ?&w")
(unspec:SVE_BHSI
@@ -13454,11 +13454,14 @@ aarch64_check_zero_based_sve_index_immediate (rtx x)
bool
aarch64_simd_shift_imm_p (rtx x, machine_mode mode, bool left)
{
+ x = unwrap_const_vec_duplicate (x);
+ if (!CONST_INT_P (x))
+ return false;
int bit_width = GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT;
if (left)
- return aarch64_const_vec_all_same_in_range_p (x, 0, bit_width - 1);
+ return IN_RANGE (INTVAL (x), 0, bit_width - 1);
else
- return aarch64_const_vec_all_same_in_range_p (x, 1, bit_width);
+ return IN_RANGE (INTVAL (x), 1, bit_width);
}
/* Return the bitmask CONST_INT to select the bits required by a zero extract
new file mode 100644
@@ -0,0 +1,511 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** lsl_s16_m_tied1:
+** lsl z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s16_m_tied1, svint16_t,
+ z0 = svlsl_s16_m (p0, z0, z1),
+ z0 = svlsl_m (p0, z0, z1))
+
+/*
+** lsl_s16_m_untied:
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s16_m_untied, svint16_t,
+ z0 = svlsl_s16_m (p0, z1, z2),
+ z0 = svlsl_m (p0, z1, z2))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_UNIFORM_Z (lsl_s16_m_tied2, svint16_t,
+ z1 = svlsl_s16_m (p0, z0, z1),
+ z1 = svlsl_m (p0, z0, z1))
+
+/*
+** lsl_w0_s16_m_tied1:
+** mov (z[0-9]+\.d), x0
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s16_m_tied1, svint16_t, uint64_t,
+ z0 = svlsl_n_s16_m (p0, z0, x0),
+ z0 = svlsl_m (p0, z0, x0))
+
+/*
+** lsl_w0_s16_m_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s16_m_untied, svint16_t, uint64_t,
+ z0 = svlsl_n_s16_m (p0, z1, x0),
+ z0 = svlsl_m (p0, z1, x0))
+
+/*
+** lsl_d0_s16_m_tied1:
+** mov (z[0-9]+\.d), d0
+** lsl z1\.h, p0/m, z1\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s16_m_tied1, svint16_t, uint64_t,
+ z1 = svlsl_n_s16_m (p0, z1, d0),
+ z1 = svlsl_m (p0, z1, d0))
+
+/*
+** lsl_d0_s16_m_untied:
+** mov (z[0-9]+\.d), d0
+** movprfx z1, z2
+** lsl z1\.h, p0/m, z1\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s16_m_untied, svint16_t, uint64_t,
+ z1 = svlsl_n_s16_m (p0, z2, d0),
+ z1 = svlsl_m (p0, z2, d0))
+
+/*
+** lsl_0_s16_m_tied1:
+** sel z0\.h, p0, z0\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_0_s16_m_tied1, svint16_t, uint64_t,
+ z0 = svlsl_n_s16_m (p0, z0, 0),
+ z0 = svlsl_m (p0, z0, 0))
+
+/*
+** lsl_0_s16_m_untied:
+** sel z0\.h, p0, z1\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_0_s16_m_untied, svint16_t, uint64_t,
+ z0 = svlsl_n_s16_m (p0, z1, 0),
+ z0 = svlsl_m (p0, z1, 0))
+
+/*
+** lsl_1_s16_m_tied1:
+** lsl z0\.h, p0/m, z0\.h, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s16_m_tied1, svint16_t,
+ z0 = svlsl_n_s16_m (p0, z0, 1),
+ z0 = svlsl_m (p0, z0, 1))
+
+/*
+** lsl_1_s16_m_untied:
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s16_m_untied, svint16_t,
+ z0 = svlsl_n_s16_m (p0, z1, 1),
+ z0 = svlsl_m (p0, z1, 1))
+
+/*
+** lsl_m1_s16_m_tied1:
+** mov (z[0-9]+)\.b, #-1
+** lsl z0\.h, p0/m, z0\.h, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s16_m_tied1, svint16_t,
+ z0 = svlsl_n_s16_m (p0, z0, -1),
+ z0 = svlsl_m (p0, z0, -1))
+
+/*
+** lsl_m1_s16_m_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s16_m_untied, svint16_t,
+ z0 = svlsl_n_s16_m (p0, z1, -1),
+ z0 = svlsl_m (p0, z1, -1))
+
+/*
+** lsl_15_s16_m_tied1:
+** lsl z0\.h, p0/m, z0\.h, #15
+** ret
+*/
+TEST_UNIFORM_Z (lsl_15_s16_m_tied1, svint16_t,
+ z0 = svlsl_n_s16_m (p0, z0, 15),
+ z0 = svlsl_m (p0, z0, 15))
+
+/*
+** lsl_15_s16_m_untied:
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, #15
+** ret
+*/
+TEST_UNIFORM_Z (lsl_15_s16_m_untied, svint16_t,
+ z0 = svlsl_n_s16_m (p0, z1, 15),
+ z0 = svlsl_m (p0, z1, 15))
+
+/*
+** lsl_16_s16_m_tied1:
+** mov (z[0-9]+\.d), #16
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_16_s16_m_tied1, svint16_t,
+ z0 = svlsl_n_s16_m (p0, z0, 16),
+ z0 = svlsl_m (p0, z0, 16))
+
+/*
+** lsl_16_s16_m_untied:
+** mov (z[0-9]+\.d), #16
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_16_s16_m_untied, svint16_t,
+ z0 = svlsl_n_s16_m (p0, z1, 16),
+ z0 = svlsl_m (p0, z1, 16))
+
+/*
+** lsl_s16_z_tied1:
+** movprfx z0\.h, p0/z, z0\.h
+** lsl z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s16_z_tied1, svint16_t,
+ z0 = svlsl_s16_z (p0, z0, z1),
+ z0 = svlsl_z (p0, z0, z1))
+
+/*
+** lsl_s16_z_untied:
+** movprfx z0\.h, p0/z, z1\.h
+** lsl z0\.h, p0/m, z0\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s16_z_untied, svint16_t,
+ z0 = svlsl_s16_z (p0, z1, z2),
+ z0 = svlsl_z (p0, z1, z2))
+
+/*
+** lsl_s16_z_tied2:
+** movprfx z0\.h, p0/z, z0\.h
+** lslr z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s16_z_tied2, svint16_t,
+ z0 = svlsl_s16_z (p0, z1, z0),
+ z0 = svlsl_z (p0, z1, z0))
+
+/*
+** lsl_w0_s16_z_tied1:
+** mov (z[0-9]+\.d), x0
+** movprfx z0\.h, p0/z, z0\.h
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s16_z_tied1, svint16_t, uint64_t,
+ z0 = svlsl_n_s16_z (p0, z0, x0),
+ z0 = svlsl_z (p0, z0, x0))
+
+/*
+** lsl_w0_s16_z_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0\.h, p0/z, z1\.h
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s16_z_untied, svint16_t, uint64_t,
+ z0 = svlsl_n_s16_z (p0, z1, x0),
+ z0 = svlsl_z (p0, z1, x0))
+
+/*
+** lsl_d0_s16_z_tied1:
+** mov (z[0-9d]+\.d), d0
+** movprfx z1\.h, p0/z, z1\.h
+** lsl z1\.h, p0/m, z1\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s16_z_tied1, svint16_t, uint64_t,
+ z1 = svlsl_n_s16_z (p0, z1, d0),
+ z1 = svlsl_z (p0, z1, d0))
+
+/*
+** lsl_d0_s16_z_untied:
+** mov (z[0-9d]+\.d), d0
+** movprfx z1\.h, p0/z, z2\.h
+** lsl z1\.h, p0/m, z1\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s16_z_untied, svint16_t, uint64_t,
+ z1 = svlsl_n_s16_z (p0, z2, d0),
+ z1 = svlsl_z (p0, z2, d0))
+
+/*
+** lsl_0_s16_z_tied1:
+** movprfx z0\.h, p0/z, z0\.h
+** lsl z0\.h, p0/m, z0\.h, #0
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_s16_z_tied1, svint16_t,
+ z0 = svlsl_n_s16_z (p0, z0, 0),
+ z0 = svlsl_z (p0, z0, 0))
+
+/*
+** lsl_0_s16_z_untied:
+** movprfx z0\.h, p0/z, z1\.h
+** lsl z0\.h, p0/m, z0\.h, #0
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_s16_z_untied, svint16_t,
+ z0 = svlsl_n_s16_z (p0, z1, 0),
+ z0 = svlsl_z (p0, z1, 0))
+
+/*
+** lsl_1_s16_z_tied1:
+** movprfx z0\.h, p0/z, z0\.h
+** lsl z0\.h, p0/m, z0\.h, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s16_z_tied1, svint16_t,
+ z0 = svlsl_n_s16_z (p0, z0, 1),
+ z0 = svlsl_z (p0, z0, 1))
+
+/*
+** lsl_1_s16_z_untied:
+** movprfx z0\.h, p0/z, z1\.h
+** lsl z0\.h, p0/m, z0\.h, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s16_z_untied, svint16_t,
+ z0 = svlsl_n_s16_z (p0, z1, 1),
+ z0 = svlsl_z (p0, z1, 1))
+
+/*
+** lsl_m1_s16_z_tied1:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0\.h, p0/z, z0\.h
+** lsl z0\.h, p0/m, z0\.h, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s16_z_tied1, svint16_t,
+ z0 = svlsl_n_s16_z (p0, z0, -1),
+ z0 = svlsl_z (p0, z0, -1))
+
+/*
+** lsl_m1_s16_z_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0\.h, p0/z, z1\.h
+** lsl z0\.h, p0/m, z0\.h, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s16_z_untied, svint16_t,
+ z0 = svlsl_n_s16_z (p0, z1, -1),
+ z0 = svlsl_z (p0, z1, -1))
+
+/*
+** lsl_15_s16_z_tied1:
+** movprfx z0\.h, p0/z, z0\.h
+** lsl z0\.h, p0/m, z0\.h, #15
+** ret
+*/
+TEST_UNIFORM_Z (lsl_15_s16_z_tied1, svint16_t,
+ z0 = svlsl_n_s16_z (p0, z0, 15),
+ z0 = svlsl_z (p0, z0, 15))
+
+/*
+** lsl_15_s16_z_untied:
+** movprfx z0\.h, p0/z, z1\.h
+** lsl z0\.h, p0/m, z0\.h, #15
+** ret
+*/
+TEST_UNIFORM_Z (lsl_15_s16_z_untied, svint16_t,
+ z0 = svlsl_n_s16_z (p0, z1, 15),
+ z0 = svlsl_z (p0, z1, 15))
+
+/*
+** lsl_16_s16_z_tied1:
+** mov (z[0-9]+\.d), #16
+** movprfx z0\.h, p0/z, z0\.h
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_16_s16_z_tied1, svint16_t,
+ z0 = svlsl_n_s16_z (p0, z0, 16),
+ z0 = svlsl_z (p0, z0, 16))
+
+/*
+** lsl_16_s16_z_untied:
+** mov (z[0-9]+\.d), #16
+** movprfx z0\.h, p0/z, z1\.h
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_16_s16_z_untied, svint16_t,
+ z0 = svlsl_n_s16_z (p0, z1, 16),
+ z0 = svlsl_z (p0, z1, 16))
+
+/*
+** lsl_s16_x_tied1:
+** lsl z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s16_x_tied1, svint16_t,
+ z0 = svlsl_s16_x (p0, z0, z1),
+ z0 = svlsl_x (p0, z0, z1))
+
+/*
+** lsl_s16_x_untied:
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s16_x_untied, svint16_t,
+ z0 = svlsl_s16_m (p0, z1, z2),
+ z0 = svlsl_s16_m (p0, z1, z2))
+
+/*
+** lsl_s16_x_tied2:
+** lslr z1\.h, p0/m, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s16_x_tied2, svint16_t,
+ z1 = svlsl_s16_x (p0, z0, z1),
+ z1 = svlsl_x (p0, z0, z1))
+
+/*
+** lsl_w0_s16_x_tied1:
+** mov (z[0-9]+\.d), x0
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s16_x_tied1, svint16_t, uint64_t,
+ z0 = svlsl_n_s16_x (p0, z0, x0),
+ z0 = svlsl_x (p0, z0, x0))
+
+/*
+** lsl_w0_s16_x_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s16_x_untied, svint16_t, uint64_t,
+ z0 = svlsl_n_s16_x (p0, z1, x0),
+ z0 = svlsl_x (p0, z1, x0))
+
+/*
+** lsl_d0_s16_x_tied1:
+** mov (z[0-9]+\.d), d0
+** lsl z1\.h, p0/m, z1\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s16_x_tied1, svint16_t, uint64_t,
+ z1 = svlsl_n_s16_x (p0, z1, d0),
+ z1 = svlsl_x (p0, z1, d0))
+
+/*
+** lsl_d0_s16_x_untied:
+** mov (z[0-9]+\.d), d0
+** movprfx z1, z2
+** lsl z1\.h, p0/m, z1\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s16_x_untied, svint16_t, uint64_t,
+ z1 = svlsl_n_s16_x (p0, z2, d0),
+ z1 = svlsl_x (p0, z2, d0))
+
+/*
+** lsl_0_s16_x_tied1:
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_s16_x_tied1, svint16_t,
+ z0 = svlsl_n_s16_x (p0, z0, 0),
+ z0 = svlsl_x (p0, z0, 0))
+
+/*
+** lsl_0_s16_x_untied:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_s16_x_untied, svint16_t,
+ z0 = svlsl_n_s16_x (p0, z1, 0),
+ z0 = svlsl_x (p0, z1, 0))
+
+/*
+** lsl_1_s16_x_tied1:
+** lsl z0\.h, z0\.h, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s16_x_tied1, svint16_t,
+ z0 = svlsl_n_s16_x (p0, z0, 1),
+ z0 = svlsl_x (p0, z0, 1))
+
+/*
+** lsl_1_s16_x_untied:
+** lsl z0\.h, z1\.h, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s16_x_untied, svint16_t,
+ z0 = svlsl_n_s16_x (p0, z1, 1),
+ z0 = svlsl_x (p0, z1, 1))
+
+/*
+** lsl_m1_s16_x_tied1:
+** mov (z[0-9]+)\.b, #-1
+** lsl z0\.h, p0/m, z0\.h, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s16_x_tied1, svint16_t,
+ z0 = svlsl_n_s16_x (p0, z0, -1),
+ z0 = svlsl_x (p0, z0, -1))
+
+/*
+** lsl_m1_s16_x_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s16_x_untied, svint16_t,
+ z0 = svlsl_n_s16_x (p0, z1, -1),
+ z0 = svlsl_x (p0, z1, -1))
+
+/*
+** lsl_15_s16_x_tied1:
+** lsl z0\.h, z0\.h, #15
+** ret
+*/
+TEST_UNIFORM_Z (lsl_15_s16_x_tied1, svint16_t,
+ z0 = svlsl_n_s16_x (p0, z0, 15),
+ z0 = svlsl_x (p0, z0, 15))
+
+/*
+** lsl_15_s16_x_untied:
+** lsl z0\.h, z1\.h, #15
+** ret
+*/
+TEST_UNIFORM_Z (lsl_15_s16_x_untied, svint16_t,
+ z0 = svlsl_n_s16_x (p0, z1, 15),
+ z0 = svlsl_x (p0, z1, 15))
+
+/*
+** lsl_16_s16_x_tied1:
+** mov (z[0-9]+\.d), #16
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_16_s16_x_tied1, svint16_t,
+ z0 = svlsl_n_s16_x (p0, z0, 16),
+ z0 = svlsl_x (p0, z0, 16))
+
+/*
+** lsl_16_s16_x_untied:
+** mov (z[0-9]+\.d), #16
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_16_s16_x_untied, svint16_t,
+ z0 = svlsl_n_s16_x (p0, z1, 16),
+ z0 = svlsl_x (p0, z1, 16))
new file mode 100644
@@ -0,0 +1,511 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** lsl_s32_m_tied1:
+** lsl z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s32_m_tied1, svint32_t,
+ z0 = svlsl_s32_m (p0, z0, z1),
+ z0 = svlsl_m (p0, z0, z1))
+
+/*
+** lsl_s32_m_untied:
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s32_m_untied, svint32_t,
+ z0 = svlsl_s32_m (p0, z1, z2),
+ z0 = svlsl_m (p0, z1, z2))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_UNIFORM_Z (lsl_s32_m_tied2, svint32_t,
+ z1 = svlsl_s32_m (p0, z0, z1),
+ z1 = svlsl_m (p0, z0, z1))
+
+/*
+** lsl_w0_s32_m_tied1:
+** mov (z[0-9]+\.d), x0
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s32_m_tied1, svint32_t, uint64_t,
+ z0 = svlsl_n_s32_m (p0, z0, x0),
+ z0 = svlsl_m (p0, z0, x0))
+
+/*
+** lsl_w0_s32_m_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s32_m_untied, svint32_t, uint64_t,
+ z0 = svlsl_n_s32_m (p0, z1, x0),
+ z0 = svlsl_m (p0, z1, x0))
+
+/*
+** lsl_d0_s32_m_tied1:
+** mov (z[0-9]+\.d), d0
+** lsl z1\.s, p0/m, z1\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s32_m_tied1, svint32_t, uint64_t,
+ z1 = svlsl_n_s32_m (p0, z1, d0),
+ z1 = svlsl_m (p0, z1, d0))
+
+/*
+** lsl_d0_s32_m_untied:
+** mov (z[0-9]+\.d), d0
+** movprfx z1, z2
+** lsl z1\.s, p0/m, z1\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s32_m_untied, svint32_t, uint64_t,
+ z1 = svlsl_n_s32_m (p0, z2, d0),
+ z1 = svlsl_m (p0, z2, d0))
+
+/*
+** lsl_0_s32_m_tied1:
+** sel z0\.s, p0, z0\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_0_s32_m_tied1, svint32_t, uint64_t,
+ z0 = svlsl_n_s32_m (p0, z0, 0),
+ z0 = svlsl_m (p0, z0, 0))
+
+/*
+** lsl_0_s32_m_untied:
+** sel z0\.s, p0, z1\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_0_s32_m_untied, svint32_t, uint64_t,
+ z0 = svlsl_n_s32_m (p0, z1, 0),
+ z0 = svlsl_m (p0, z1, 0))
+
+/*
+** lsl_1_s32_m_tied1:
+** lsl z0\.s, p0/m, z0\.s, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s32_m_tied1, svint32_t,
+ z0 = svlsl_n_s32_m (p0, z0, 1),
+ z0 = svlsl_m (p0, z0, 1))
+
+/*
+** lsl_1_s32_m_untied:
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s32_m_untied, svint32_t,
+ z0 = svlsl_n_s32_m (p0, z1, 1),
+ z0 = svlsl_m (p0, z1, 1))
+
+/*
+** lsl_m1_s32_m_tied1:
+** mov (z[0-9]+)\.b, #-1
+** lsl z0\.s, p0/m, z0\.s, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s32_m_tied1, svint32_t,
+ z0 = svlsl_n_s32_m (p0, z0, -1),
+ z0 = svlsl_m (p0, z0, -1))
+
+/*
+** lsl_m1_s32_m_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s32_m_untied, svint32_t,
+ z0 = svlsl_n_s32_m (p0, z1, -1),
+ z0 = svlsl_m (p0, z1, -1))
+
+/*
+** lsl_31_s32_m_tied1:
+** lsl z0\.s, p0/m, z0\.s, #31
+** ret
+*/
+TEST_UNIFORM_Z (lsl_31_s32_m_tied1, svint32_t,
+ z0 = svlsl_n_s32_m (p0, z0, 31),
+ z0 = svlsl_m (p0, z0, 31))
+
+/*
+** lsl_31_s32_m_untied:
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, #31
+** ret
+*/
+TEST_UNIFORM_Z (lsl_31_s32_m_untied, svint32_t,
+ z0 = svlsl_n_s32_m (p0, z1, 31),
+ z0 = svlsl_m (p0, z1, 31))
+
+/*
+** lsl_32_s32_m_tied1:
+** mov (z[0-9]+\.d), #32
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_32_s32_m_tied1, svint32_t,
+ z0 = svlsl_n_s32_m (p0, z0, 32),
+ z0 = svlsl_m (p0, z0, 32))
+
+/*
+** lsl_32_s32_m_untied:
+** mov (z[0-9]+\.d), #32
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_32_s32_m_untied, svint32_t,
+ z0 = svlsl_n_s32_m (p0, z1, 32),
+ z0 = svlsl_m (p0, z1, 32))
+
+/*
+** lsl_s32_z_tied1:
+** movprfx z0\.s, p0/z, z0\.s
+** lsl z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s32_z_tied1, svint32_t,
+ z0 = svlsl_s32_z (p0, z0, z1),
+ z0 = svlsl_z (p0, z0, z1))
+
+/*
+** lsl_s32_z_untied:
+** movprfx z0\.s, p0/z, z1\.s
+** lsl z0\.s, p0/m, z0\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s32_z_untied, svint32_t,
+ z0 = svlsl_s32_z (p0, z1, z2),
+ z0 = svlsl_z (p0, z1, z2))
+
+/*
+** lsl_s32_z_tied2:
+** movprfx z0\.s, p0/z, z0\.s
+** lslr z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s32_z_tied2, svint32_t,
+ z0 = svlsl_s32_z (p0, z1, z0),
+ z0 = svlsl_z (p0, z1, z0))
+
+/*
+** lsl_w0_s32_z_tied1:
+** mov (z[0-9]+\.d), x0
+** movprfx z0\.s, p0/z, z0\.s
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s32_z_tied1, svint32_t, uint64_t,
+ z0 = svlsl_n_s32_z (p0, z0, x0),
+ z0 = svlsl_z (p0, z0, x0))
+
+/*
+** lsl_w0_s32_z_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0\.s, p0/z, z1\.s
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s32_z_untied, svint32_t, uint64_t,
+ z0 = svlsl_n_s32_z (p0, z1, x0),
+ z0 = svlsl_z (p0, z1, x0))
+
+/*
+** lsl_d0_s32_z_tied1:
+** mov (z[0-9d]+\.d), d0
+** movprfx z1\.s, p0/z, z1\.s
+** lsl z1\.s, p0/m, z1\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s32_z_tied1, svint32_t, uint64_t,
+ z1 = svlsl_n_s32_z (p0, z1, d0),
+ z1 = svlsl_z (p0, z1, d0))
+
+/*
+** lsl_d0_s32_z_untied:
+** mov (z[0-9d]+\.d), d0
+** movprfx z1\.s, p0/z, z2\.s
+** lsl z1\.s, p0/m, z1\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s32_z_untied, svint32_t, uint64_t,
+ z1 = svlsl_n_s32_z (p0, z2, d0),
+ z1 = svlsl_z (p0, z2, d0))
+
+/*
+** lsl_0_s32_z_tied1:
+** movprfx z0\.s, p0/z, z0\.s
+** lsl z0\.s, p0/m, z0\.s, #0
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_s32_z_tied1, svint32_t,
+ z0 = svlsl_n_s32_z (p0, z0, 0),
+ z0 = svlsl_z (p0, z0, 0))
+
+/*
+** lsl_0_s32_z_untied:
+** movprfx z0\.s, p0/z, z1\.s
+** lsl z0\.s, p0/m, z0\.s, #0
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_s32_z_untied, svint32_t,
+ z0 = svlsl_n_s32_z (p0, z1, 0),
+ z0 = svlsl_z (p0, z1, 0))
+
+/*
+** lsl_1_s32_z_tied1:
+** movprfx z0\.s, p0/z, z0\.s
+** lsl z0\.s, p0/m, z0\.s, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s32_z_tied1, svint32_t,
+ z0 = svlsl_n_s32_z (p0, z0, 1),
+ z0 = svlsl_z (p0, z0, 1))
+
+/*
+** lsl_1_s32_z_untied:
+** movprfx z0\.s, p0/z, z1\.s
+** lsl z0\.s, p0/m, z0\.s, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s32_z_untied, svint32_t,
+ z0 = svlsl_n_s32_z (p0, z1, 1),
+ z0 = svlsl_z (p0, z1, 1))
+
+/*
+** lsl_m1_s32_z_tied1:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0\.s, p0/z, z0\.s
+** lsl z0\.s, p0/m, z0\.s, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s32_z_tied1, svint32_t,
+ z0 = svlsl_n_s32_z (p0, z0, -1),
+ z0 = svlsl_z (p0, z0, -1))
+
+/*
+** lsl_m1_s32_z_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0\.s, p0/z, z1\.s
+** lsl z0\.s, p0/m, z0\.s, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s32_z_untied, svint32_t,
+ z0 = svlsl_n_s32_z (p0, z1, -1),
+ z0 = svlsl_z (p0, z1, -1))
+
+/*
+** lsl_31_s32_z_tied1:
+** movprfx z0\.s, p0/z, z0\.s
+** lsl z0\.s, p0/m, z0\.s, #31
+** ret
+*/
+TEST_UNIFORM_Z (lsl_31_s32_z_tied1, svint32_t,
+ z0 = svlsl_n_s32_z (p0, z0, 31),
+ z0 = svlsl_z (p0, z0, 31))
+
+/*
+** lsl_31_s32_z_untied:
+** movprfx z0\.s, p0/z, z1\.s
+** lsl z0\.s, p0/m, z0\.s, #31
+** ret
+*/
+TEST_UNIFORM_Z (lsl_31_s32_z_untied, svint32_t,
+ z0 = svlsl_n_s32_z (p0, z1, 31),
+ z0 = svlsl_z (p0, z1, 31))
+
+/*
+** lsl_32_s32_z_tied1:
+** mov (z[0-9]+\.d), #32
+** movprfx z0\.s, p0/z, z0\.s
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_32_s32_z_tied1, svint32_t,
+ z0 = svlsl_n_s32_z (p0, z0, 32),
+ z0 = svlsl_z (p0, z0, 32))
+
+/*
+** lsl_32_s32_z_untied:
+** mov (z[0-9]+\.d), #32
+** movprfx z0\.s, p0/z, z1\.s
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_32_s32_z_untied, svint32_t,
+ z0 = svlsl_n_s32_z (p0, z1, 32),
+ z0 = svlsl_z (p0, z1, 32))
+
+/*
+** lsl_s32_x_tied1:
+** lsl z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s32_x_tied1, svint32_t,
+ z0 = svlsl_s32_x (p0, z0, z1),
+ z0 = svlsl_x (p0, z0, z1))
+
+/*
+** lsl_s32_x_untied:
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s32_x_untied, svint32_t,
+ z0 = svlsl_s32_m (p0, z1, z2),
+ z0 = svlsl_s32_m (p0, z1, z2))
+
+/*
+** lsl_s32_x_tied2:
+** lslr z1\.s, p0/m, z1\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s32_x_tied2, svint32_t,
+ z1 = svlsl_s32_x (p0, z0, z1),
+ z1 = svlsl_x (p0, z0, z1))
+
+/*
+** lsl_w0_s32_x_tied1:
+** mov (z[0-9]+\.d), x0
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s32_x_tied1, svint32_t, uint64_t,
+ z0 = svlsl_n_s32_x (p0, z0, x0),
+ z0 = svlsl_x (p0, z0, x0))
+
+/*
+** lsl_w0_s32_x_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s32_x_untied, svint32_t, uint64_t,
+ z0 = svlsl_n_s32_x (p0, z1, x0),
+ z0 = svlsl_x (p0, z1, x0))
+
+/*
+** lsl_d0_s32_x_tied1:
+** mov (z[0-9]+\.d), d0
+** lsl z1\.s, p0/m, z1\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s32_x_tied1, svint32_t, uint64_t,
+ z1 = svlsl_n_s32_x (p0, z1, d0),
+ z1 = svlsl_x (p0, z1, d0))
+
+/*
+** lsl_d0_s32_x_untied:
+** mov (z[0-9]+\.d), d0
+** movprfx z1, z2
+** lsl z1\.s, p0/m, z1\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s32_x_untied, svint32_t, uint64_t,
+ z1 = svlsl_n_s32_x (p0, z2, d0),
+ z1 = svlsl_x (p0, z2, d0))
+
+/*
+** lsl_0_s32_x_tied1:
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_s32_x_tied1, svint32_t,
+ z0 = svlsl_n_s32_x (p0, z0, 0),
+ z0 = svlsl_x (p0, z0, 0))
+
+/*
+** lsl_0_s32_x_untied:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_s32_x_untied, svint32_t,
+ z0 = svlsl_n_s32_x (p0, z1, 0),
+ z0 = svlsl_x (p0, z1, 0))
+
+/*
+** lsl_1_s32_x_tied1:
+** lsl z0\.s, z0\.s, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s32_x_tied1, svint32_t,
+ z0 = svlsl_n_s32_x (p0, z0, 1),
+ z0 = svlsl_x (p0, z0, 1))
+
+/*
+** lsl_1_s32_x_untied:
+** lsl z0\.s, z1\.s, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s32_x_untied, svint32_t,
+ z0 = svlsl_n_s32_x (p0, z1, 1),
+ z0 = svlsl_x (p0, z1, 1))
+
+/*
+** lsl_m1_s32_x_tied1:
+** mov (z[0-9]+)\.b, #-1
+** lsl z0\.s, p0/m, z0\.s, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s32_x_tied1, svint32_t,
+ z0 = svlsl_n_s32_x (p0, z0, -1),
+ z0 = svlsl_x (p0, z0, -1))
+
+/*
+** lsl_m1_s32_x_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s32_x_untied, svint32_t,
+ z0 = svlsl_n_s32_x (p0, z1, -1),
+ z0 = svlsl_x (p0, z1, -1))
+
+/*
+** lsl_31_s32_x_tied1:
+** lsl z0\.s, z0\.s, #31
+** ret
+*/
+TEST_UNIFORM_Z (lsl_31_s32_x_tied1, svint32_t,
+ z0 = svlsl_n_s32_x (p0, z0, 31),
+ z0 = svlsl_x (p0, z0, 31))
+
+/*
+** lsl_31_s32_x_untied:
+** lsl z0\.s, z1\.s, #31
+** ret
+*/
+TEST_UNIFORM_Z (lsl_31_s32_x_untied, svint32_t,
+ z0 = svlsl_n_s32_x (p0, z1, 31),
+ z0 = svlsl_x (p0, z1, 31))
+
+/*
+** lsl_32_s32_x_tied1:
+** mov (z[0-9]+\.d), #32
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_32_s32_x_tied1, svint32_t,
+ z0 = svlsl_n_s32_x (p0, z0, 32),
+ z0 = svlsl_x (p0, z0, 32))
+
+/*
+** lsl_32_s32_x_untied:
+** mov (z[0-9]+\.d), #32
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_32_s32_x_untied, svint32_t,
+ z0 = svlsl_n_s32_x (p0, z1, 32),
+ z0 = svlsl_x (p0, z1, 32))
new file mode 100644
@@ -0,0 +1,507 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** lsl_s64_m_tied1:
+** lsl z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s64_m_tied1, svint64_t,
+ z0 = svlsl_s64_m (p0, z0, z1),
+ z0 = svlsl_m (p0, z0, z1))
+
+/*
+** lsl_s64_m_untied:
+** movprfx z0, z1
+** lsl z0\.d, p0/m, z0\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s64_m_untied, svint64_t,
+ z0 = svlsl_s64_m (p0, z1, z2),
+ z0 = svlsl_m (p0, z1, z2))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_UNIFORM_Z (lsl_s64_m_tied2, svint64_t,
+ z1 = svlsl_s64_m (p0, z0, z1),
+ z1 = svlsl_m (p0, z0, z1))
+
+/*
+** lsl_w0_s64_m_tied1:
+** mov (z[0-9]+\.d), x0
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s64_m_tied1, svint64_t, uint64_t,
+ z0 = svlsl_n_s64_m (p0, z0, x0),
+ z0 = svlsl_m (p0, z0, x0))
+
+/*
+** lsl_w0_s64_m_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0, z1
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s64_m_untied, svint64_t, uint64_t,
+ z0 = svlsl_n_s64_m (p0, z1, x0),
+ z0 = svlsl_m (p0, z1, x0))
+
+/*
+** lsl_d0_s64_m_tied1:
+** mov (z[0-9]+\.d), d0
+** lsl z1\.d, p0/m, z1\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s64_m_tied1, svint64_t, uint64_t,
+ z1 = svlsl_n_s64_m (p0, z1, d0),
+ z1 = svlsl_m (p0, z1, d0))
+
+/*
+** lsl_d0_s64_m_untied:
+** mov (z[0-9]+\.d), d0
+** movprfx z1, z2
+** lsl z1\.d, p0/m, z1\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s64_m_untied, svint64_t, uint64_t,
+ z1 = svlsl_n_s64_m (p0, z2, d0),
+ z1 = svlsl_m (p0, z2, d0))
+
+/*
+** lsl_0_s64_m_tied1:
+** sel z0\.d, p0, z0\.d, z0\.d
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_0_s64_m_tied1, svint64_t, uint64_t,
+ z0 = svlsl_n_s64_m (p0, z0, 0),
+ z0 = svlsl_m (p0, z0, 0))
+
+/*
+** lsl_0_s64_m_untied:
+** sel z0\.d, p0, z1\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_0_s64_m_untied, svint64_t, uint64_t,
+ z0 = svlsl_n_s64_m (p0, z1, 0),
+ z0 = svlsl_m (p0, z1, 0))
+
+/*
+** lsl_1_s64_m_tied1:
+** lsl z0\.d, p0/m, z0\.d, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s64_m_tied1, svint64_t,
+ z0 = svlsl_n_s64_m (p0, z0, 1),
+ z0 = svlsl_m (p0, z0, 1))
+
+/*
+** lsl_1_s64_m_untied:
+** movprfx z0, z1
+** lsl z0\.d, p0/m, z0\.d, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s64_m_untied, svint64_t,
+ z0 = svlsl_n_s64_m (p0, z1, 1),
+ z0 = svlsl_m (p0, z1, 1))
+
+/*
+** lsl_m1_s64_m_tied1:
+** mov (z[0-9]+)\.b, #-1
+** lsl z0\.d, p0/m, z0\.d, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s64_m_tied1, svint64_t,
+ z0 = svlsl_n_s64_m (p0, z0, -1),
+ z0 = svlsl_m (p0, z0, -1))
+
+/*
+** lsl_m1_s64_m_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0, z1
+** lsl z0\.d, p0/m, z0\.d, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s64_m_untied, svint64_t,
+ z0 = svlsl_n_s64_m (p0, z1, -1),
+ z0 = svlsl_m (p0, z1, -1))
+
+/*
+** lsl_63_s64_m_tied1:
+** lsl z0\.d, p0/m, z0\.d, #63
+** ret
+*/
+TEST_UNIFORM_Z (lsl_63_s64_m_tied1, svint64_t,
+ z0 = svlsl_n_s64_m (p0, z0, 63),
+ z0 = svlsl_m (p0, z0, 63))
+
+/*
+** lsl_63_s64_m_untied:
+** movprfx z0, z1
+** lsl z0\.d, p0/m, z0\.d, #63
+** ret
+*/
+TEST_UNIFORM_Z (lsl_63_s64_m_untied, svint64_t,
+ z0 = svlsl_n_s64_m (p0, z1, 63),
+ z0 = svlsl_m (p0, z1, 63))
+
+/*
+** lsl_64_s64_m_tied1:
+** mov (z[0-9]+\.d), #64
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_64_s64_m_tied1, svint64_t,
+ z0 = svlsl_n_s64_m (p0, z0, 64),
+ z0 = svlsl_m (p0, z0, 64))
+
+/*
+** lsl_64_s64_m_untied:
+** mov (z[0-9]+\.d), #64
+** movprfx z0, z1
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_64_s64_m_untied, svint64_t,
+ z0 = svlsl_n_s64_m (p0, z1, 64),
+ z0 = svlsl_m (p0, z1, 64))
+
+/*
+** lsl_s64_z_tied1:
+** movprfx z0\.d, p0/z, z0\.d
+** lsl z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s64_z_tied1, svint64_t,
+ z0 = svlsl_s64_z (p0, z0, z1),
+ z0 = svlsl_z (p0, z0, z1))
+
+/*
+** lsl_s64_z_untied:
+** movprfx z0\.d, p0/z, z1\.d
+** lsl z0\.d, p0/m, z0\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s64_z_untied, svint64_t,
+ z0 = svlsl_s64_z (p0, z1, z2),
+ z0 = svlsl_z (p0, z1, z2))
+
+/*
+** lsl_s64_z_tied2:
+** movprfx z0\.d, p0/z, z0\.d
+** lslr z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s64_z_tied2, svint64_t,
+ z0 = svlsl_s64_z (p0, z1, z0),
+ z0 = svlsl_z (p0, z1, z0))
+
+/*
+** lsl_w0_s64_z_tied1:
+** mov (z[0-9]+\.d), x0
+** movprfx z0\.d, p0/z, z0\.d
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s64_z_tied1, svint64_t, uint64_t,
+ z0 = svlsl_n_s64_z (p0, z0, x0),
+ z0 = svlsl_z (p0, z0, x0))
+
+/*
+** lsl_w0_s64_z_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0\.d, p0/z, z1\.d
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s64_z_untied, svint64_t, uint64_t,
+ z0 = svlsl_n_s64_z (p0, z1, x0),
+ z0 = svlsl_z (p0, z1, x0))
+
+/*
+** lsl_d0_s64_z_tied1:
+** mov (z[0-9]+\.d), d0
+** movprfx z1\.d, p0/z, z1\.d
+** lsl z1\.d, p0/m, z1\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s64_z_tied1, svint64_t, uint64_t,
+ z1 = svlsl_n_s64_z (p0, z1, d0),
+ z1 = svlsl_z (p0, z1, d0))
+
+/*
+** lsl_d0_s64_z_untied:
+** mov (z[0-9]+\.d), d0
+** movprfx z1\.d, p0/z, z2\.d
+** lsl z1\.d, p0/m, z1\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s64_z_untied, svint64_t, uint64_t,
+ z1 = svlsl_n_s64_z (p0, z2, d0),
+ z1 = svlsl_z (p0, z2, d0))
+
+/*
+** lsl_0_s64_z_tied1:
+** movprfx z0\.d, p0/z, z0\.d
+** lsl z0\.d, p0/m, z0\.d, #0
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_s64_z_tied1, svint64_t,
+ z0 = svlsl_n_s64_z (p0, z0, 0),
+ z0 = svlsl_z (p0, z0, 0))
+
+/*
+** lsl_0_s64_z_untied:
+** movprfx z0\.d, p0/z, z1\.d
+** lsl z0\.d, p0/m, z0\.d, #0
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_s64_z_untied, svint64_t,
+ z0 = svlsl_n_s64_z (p0, z1, 0),
+ z0 = svlsl_z (p0, z1, 0))
+
+/*
+** lsl_1_s64_z_tied1:
+** movprfx z0\.d, p0/z, z0\.d
+** lsl z0\.d, p0/m, z0\.d, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s64_z_tied1, svint64_t,
+ z0 = svlsl_n_s64_z (p0, z0, 1),
+ z0 = svlsl_z (p0, z0, 1))
+
+/*
+** lsl_1_s64_z_untied:
+** movprfx z0\.d, p0/z, z1\.d
+** lsl z0\.d, p0/m, z0\.d, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s64_z_untied, svint64_t,
+ z0 = svlsl_n_s64_z (p0, z1, 1),
+ z0 = svlsl_z (p0, z1, 1))
+
+/*
+** lsl_m1_s64_z_tied1:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0\.d, p0/z, z0\.d
+** lsl z0\.d, p0/m, z0\.d, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s64_z_tied1, svint64_t,
+ z0 = svlsl_n_s64_z (p0, z0, -1),
+ z0 = svlsl_z (p0, z0, -1))
+
+/*
+** lsl_m1_s64_z_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0\.d, p0/z, z1\.d
+** lsl z0\.d, p0/m, z0\.d, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s64_z_untied, svint64_t,
+ z0 = svlsl_n_s64_z (p0, z1, -1),
+ z0 = svlsl_z (p0, z1, -1))
+
+/*
+** lsl_63_s64_z_tied1:
+** movprfx z0\.d, p0/z, z0\.d
+** lsl z0\.d, p0/m, z0\.d, #63
+** ret
+*/
+TEST_UNIFORM_Z (lsl_63_s64_z_tied1, svint64_t,
+ z0 = svlsl_n_s64_z (p0, z0, 63),
+ z0 = svlsl_z (p0, z0, 63))
+
+/*
+** lsl_63_s64_z_untied:
+** movprfx z0\.d, p0/z, z1\.d
+** lsl z0\.d, p0/m, z0\.d, #63
+** ret
+*/
+TEST_UNIFORM_Z (lsl_63_s64_z_untied, svint64_t,
+ z0 = svlsl_n_s64_z (p0, z1, 63),
+ z0 = svlsl_z (p0, z1, 63))
+
+/*
+** lsl_64_s64_z_tied1:
+** mov (z[0-9]+\.d), #64
+** movprfx z0\.d, p0/z, z0\.d
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_64_s64_z_tied1, svint64_t,
+ z0 = svlsl_n_s64_z (p0, z0, 64),
+ z0 = svlsl_z (p0, z0, 64))
+
+/*
+** lsl_64_s64_z_untied:
+** mov (z[0-9]+\.d), #64
+** movprfx z0\.d, p0/z, z1\.d
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_64_s64_z_untied, svint64_t,
+ z0 = svlsl_n_s64_z (p0, z1, 64),
+ z0 = svlsl_z (p0, z1, 64))
+
+/*
+** lsl_s64_x_tied1:
+** lsl z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s64_x_tied1, svint64_t,
+ z0 = svlsl_s64_x (p0, z0, z1),
+ z0 = svlsl_x (p0, z0, z1))
+
+/*
+** lsl_s64_x_untied:
+** movprfx z0, z1
+** lsl z0\.d, p0/m, z0\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s64_x_untied, svint64_t,
+ z0 = svlsl_s64_m (p0, z1, z2),
+ z0 = svlsl_s64_m (p0, z1, z2))
+
+/*
+** lsl_s64_x_tied2:
+** lslr z1\.d, p0/m, z1\.d, z0\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s64_x_tied2, svint64_t,
+ z1 = svlsl_s64_x (p0, z0, z1),
+ z1 = svlsl_x (p0, z0, z1))
+
+/*
+** lsl_w0_s64_x_tied1:
+** mov (z[0-9]+\.d), x0
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s64_x_tied1, svint64_t, uint64_t,
+ z0 = svlsl_n_s64_x (p0, z0, x0),
+ z0 = svlsl_x (p0, z0, x0))
+
+/*
+** lsl_w0_s64_x_untied:
+** mov z0\.d, x0
+** lslr z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s64_x_untied, svint64_t, uint64_t,
+ z0 = svlsl_n_s64_x (p0, z1, x0),
+ z0 = svlsl_x (p0, z1, x0))
+
+/*
+** lsl_d0_s64_x_tied1:
+** mov (z[0-9]+\.d), d0
+** lsl z1\.d, p0/m, z1\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s64_x_tied1, svint64_t, uint64_t,
+ z1 = svlsl_n_s64_x (p0, z1, d0),
+ z1 = svlsl_x (p0, z1, d0))
+
+/*
+** lsl_d0_s64_x_untied:
+** mov z1\.d, d0
+** lslr z1\.d, p0/m, z1\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s64_x_untied, svint64_t, uint64_t,
+ z1 = svlsl_n_s64_x (p0, z2, d0),
+ z1 = svlsl_x (p0, z2, d0))
+
+/*
+** lsl_0_s64_x_tied1:
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_s64_x_tied1, svint64_t,
+ z0 = svlsl_n_s64_x (p0, z0, 0),
+ z0 = svlsl_x (p0, z0, 0))
+
+/*
+** lsl_0_s64_x_untied:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_s64_x_untied, svint64_t,
+ z0 = svlsl_n_s64_x (p0, z1, 0),
+ z0 = svlsl_x (p0, z1, 0))
+
+/*
+** lsl_1_s64_x_tied1:
+** lsl z0\.d, z0\.d, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s64_x_tied1, svint64_t,
+ z0 = svlsl_n_s64_x (p0, z0, 1),
+ z0 = svlsl_x (p0, z0, 1))
+
+/*
+** lsl_1_s64_x_untied:
+** lsl z0\.d, z1\.d, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s64_x_untied, svint64_t,
+ z0 = svlsl_n_s64_x (p0, z1, 1),
+ z0 = svlsl_x (p0, z1, 1))
+
+/*
+** lsl_m1_s64_x_tied1:
+** mov (z[0-9]+)\.b, #-1
+** lsl z0\.d, p0/m, z0\.d, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s64_x_tied1, svint64_t,
+ z0 = svlsl_n_s64_x (p0, z0, -1),
+ z0 = svlsl_x (p0, z0, -1))
+
+/*
+** lsl_m1_s64_x_untied:
+** mov z0\.b, #-1
+** lslr z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s64_x_untied, svint64_t,
+ z0 = svlsl_n_s64_x (p0, z1, -1),
+ z0 = svlsl_x (p0, z1, -1))
+
+/*
+** lsl_63_s64_x_tied1:
+** lsl z0\.d, z0\.d, #63
+** ret
+*/
+TEST_UNIFORM_Z (lsl_63_s64_x_tied1, svint64_t,
+ z0 = svlsl_n_s64_x (p0, z0, 63),
+ z0 = svlsl_x (p0, z0, 63))
+
+/*
+** lsl_63_s64_x_untied:
+** lsl z0\.d, z1\.d, #63
+** ret
+*/
+TEST_UNIFORM_Z (lsl_63_s64_x_untied, svint64_t,
+ z0 = svlsl_n_s64_x (p0, z1, 63),
+ z0 = svlsl_x (p0, z1, 63))
+
+/*
+** lsl_64_s64_x_tied1:
+** mov (z[0-9]+\.d), #64
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_64_s64_x_tied1, svint64_t,
+ z0 = svlsl_n_s64_x (p0, z0, 64),
+ z0 = svlsl_x (p0, z0, 64))
+
+/*
+** lsl_64_s64_x_untied:
+** mov z0\.d, #64
+** lslr z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_64_s64_x_untied, svint64_t,
+ z0 = svlsl_n_s64_x (p0, z1, 64),
+ z0 = svlsl_x (p0, z1, 64))
new file mode 100644
@@ -0,0 +1,511 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** lsl_s8_m_tied1:
+** lsl z0\.b, p0/m, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s8_m_tied1, svint8_t,
+ z0 = svlsl_s8_m (p0, z0, z1),
+ z0 = svlsl_m (p0, z0, z1))
+
+/*
+** lsl_s8_m_untied:
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s8_m_untied, svint8_t,
+ z0 = svlsl_s8_m (p0, z1, z2),
+ z0 = svlsl_m (p0, z1, z2))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_UNIFORM_Z (lsl_s8_m_tied2, svint8_t,
+ z1 = svlsl_s8_m (p0, z0, z1),
+ z1 = svlsl_m (p0, z0, z1))
+
+/*
+** lsl_w0_s8_m_tied1:
+** mov (z[0-9]+\.d), x0
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s8_m_tied1, svint8_t, uint64_t,
+ z0 = svlsl_n_s8_m (p0, z0, x0),
+ z0 = svlsl_m (p0, z0, x0))
+
+/*
+** lsl_w0_s8_m_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s8_m_untied, svint8_t, uint64_t,
+ z0 = svlsl_n_s8_m (p0, z1, x0),
+ z0 = svlsl_m (p0, z1, x0))
+
+/*
+** lsl_d0_s8_m_tied1:
+** mov (z[0-9]+\.d), d0
+** lsl z1\.b, p0/m, z1\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s8_m_tied1, svint8_t, uint64_t,
+ z1 = svlsl_n_s8_m (p0, z1, d0),
+ z1 = svlsl_m (p0, z1, d0))
+
+/*
+** lsl_d0_s8_m_untied:
+** mov (z[0-9]+\.d), d0
+** movprfx z1, z2
+** lsl z1\.b, p0/m, z1\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s8_m_untied, svint8_t, uint64_t,
+ z1 = svlsl_n_s8_m (p0, z2, d0),
+ z1 = svlsl_m (p0, z2, d0))
+
+/*
+** lsl_0_s8_m_tied1:
+** sel z0\.b, p0, z0\.b, z0\.b
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_0_s8_m_tied1, svint8_t, uint64_t,
+ z0 = svlsl_n_s8_m (p0, z0, 0),
+ z0 = svlsl_m (p0, z0, 0))
+
+/*
+** lsl_0_s8_m_untied:
+** sel z0\.b, p0, z1\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_0_s8_m_untied, svint8_t, uint64_t,
+ z0 = svlsl_n_s8_m (p0, z1, 0),
+ z0 = svlsl_m (p0, z1, 0))
+
+/*
+** lsl_1_s8_m_tied1:
+** lsl z0\.b, p0/m, z0\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s8_m_tied1, svint8_t,
+ z0 = svlsl_n_s8_m (p0, z0, 1),
+ z0 = svlsl_m (p0, z0, 1))
+
+/*
+** lsl_1_s8_m_untied:
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s8_m_untied, svint8_t,
+ z0 = svlsl_n_s8_m (p0, z1, 1),
+ z0 = svlsl_m (p0, z1, 1))
+
+/*
+** lsl_m1_s8_m_tied1:
+** mov (z[0-9]+)\.b, #-1
+** lsl z0\.b, p0/m, z0\.b, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s8_m_tied1, svint8_t,
+ z0 = svlsl_n_s8_m (p0, z0, -1),
+ z0 = svlsl_m (p0, z0, -1))
+
+/*
+** lsl_m1_s8_m_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s8_m_untied, svint8_t,
+ z0 = svlsl_n_s8_m (p0, z1, -1),
+ z0 = svlsl_m (p0, z1, -1))
+
+/*
+** lsl_7_s8_m_tied1:
+** lsl z0\.b, p0/m, z0\.b, #7
+** ret
+*/
+TEST_UNIFORM_Z (lsl_7_s8_m_tied1, svint8_t,
+ z0 = svlsl_n_s8_m (p0, z0, 7),
+ z0 = svlsl_m (p0, z0, 7))
+
+/*
+** lsl_7_s8_m_untied:
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, #7
+** ret
+*/
+TEST_UNIFORM_Z (lsl_7_s8_m_untied, svint8_t,
+ z0 = svlsl_n_s8_m (p0, z1, 7),
+ z0 = svlsl_m (p0, z1, 7))
+
+/*
+** lsl_8_s8_m_tied1:
+** mov (z[0-9]+\.d), #8
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_8_s8_m_tied1, svint8_t,
+ z0 = svlsl_n_s8_m (p0, z0, 8),
+ z0 = svlsl_m (p0, z0, 8))
+
+/*
+** lsl_8_s8_m_untied:
+** mov (z[0-9]+\.d), #8
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_8_s8_m_untied, svint8_t,
+ z0 = svlsl_n_s8_m (p0, z1, 8),
+ z0 = svlsl_m (p0, z1, 8))
+
+/*
+** lsl_s8_z_tied1:
+** movprfx z0\.b, p0/z, z0\.b
+** lsl z0\.b, p0/m, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s8_z_tied1, svint8_t,
+ z0 = svlsl_s8_z (p0, z0, z1),
+ z0 = svlsl_z (p0, z0, z1))
+
+/*
+** lsl_s8_z_untied:
+** movprfx z0\.b, p0/z, z1\.b
+** lsl z0\.b, p0/m, z0\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s8_z_untied, svint8_t,
+ z0 = svlsl_s8_z (p0, z1, z2),
+ z0 = svlsl_z (p0, z1, z2))
+
+/*
+** lsl_s8_z_tied2:
+** movprfx z0\.b, p0/z, z0\.b
+** lslr z0\.b, p0/m, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s8_z_tied2, svint8_t,
+ z0 = svlsl_s8_z (p0, z1, z0),
+ z0 = svlsl_z (p0, z1, z0))
+
+/*
+** lsl_w0_s8_z_tied1:
+** mov (z[0-9]+\.d), x0
+** movprfx z0\.b, p0/z, z0\.b
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s8_z_tied1, svint8_t, uint64_t,
+ z0 = svlsl_n_s8_z (p0, z0, x0),
+ z0 = svlsl_z (p0, z0, x0))
+
+/*
+** lsl_w0_s8_z_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0\.b, p0/z, z1\.b
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s8_z_untied, svint8_t, uint64_t,
+ z0 = svlsl_n_s8_z (p0, z1, x0),
+ z0 = svlsl_z (p0, z1, x0))
+
+/*
+** lsl_d0_s8_z_tied1:
+** mov (z[0-9d]+\.d), d0
+** movprfx z1\.b, p0/z, z1\.b
+** lsl z1\.b, p0/m, z1\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s8_z_tied1, svint8_t, uint64_t,
+ z1 = svlsl_n_s8_z (p0, z1, d0),
+ z1 = svlsl_z (p0, z1, d0))
+
+/*
+** lsl_d0_s8_z_untied:
+** mov (z[0-9d]+\.d), d0
+** movprfx z1\.b, p0/z, z2\.b
+** lsl z1\.b, p0/m, z1\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s8_z_untied, svint8_t, uint64_t,
+ z1 = svlsl_n_s8_z (p0, z2, d0),
+ z1 = svlsl_z (p0, z2, d0))
+
+/*
+** lsl_0_s8_z_tied1:
+** movprfx z0\.b, p0/z, z0\.b
+** lsl z0\.b, p0/m, z0\.b, #0
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_s8_z_tied1, svint8_t,
+ z0 = svlsl_n_s8_z (p0, z0, 0),
+ z0 = svlsl_z (p0, z0, 0))
+
+/*
+** lsl_0_s8_z_untied:
+** movprfx z0\.b, p0/z, z1\.b
+** lsl z0\.b, p0/m, z0\.b, #0
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_s8_z_untied, svint8_t,
+ z0 = svlsl_n_s8_z (p0, z1, 0),
+ z0 = svlsl_z (p0, z1, 0))
+
+/*
+** lsl_1_s8_z_tied1:
+** movprfx z0\.b, p0/z, z0\.b
+** lsl z0\.b, p0/m, z0\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s8_z_tied1, svint8_t,
+ z0 = svlsl_n_s8_z (p0, z0, 1),
+ z0 = svlsl_z (p0, z0, 1))
+
+/*
+** lsl_1_s8_z_untied:
+** movprfx z0\.b, p0/z, z1\.b
+** lsl z0\.b, p0/m, z0\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s8_z_untied, svint8_t,
+ z0 = svlsl_n_s8_z (p0, z1, 1),
+ z0 = svlsl_z (p0, z1, 1))
+
+/*
+** lsl_m1_s8_z_tied1:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0\.b, p0/z, z0\.b
+** lsl z0\.b, p0/m, z0\.b, \1.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s8_z_tied1, svint8_t,
+ z0 = svlsl_n_s8_z (p0, z0, -1),
+ z0 = svlsl_z (p0, z0, -1))
+
+/*
+** lsl_m1_s8_z_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0\.b, p0/z, z1\.b
+** lsl z0\.b, p0/m, z0\.b, \1.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s8_z_untied, svint8_t,
+ z0 = svlsl_n_s8_z (p0, z1, -1),
+ z0 = svlsl_z (p0, z1, -1))
+
+/*
+** lsl_7_s8_z_tied1:
+** movprfx z0\.b, p0/z, z0\.b
+** lsl z0\.b, p0/m, z0\.b, #7
+** ret
+*/
+TEST_UNIFORM_Z (lsl_7_s8_z_tied1, svint8_t,
+ z0 = svlsl_n_s8_z (p0, z0, 7),
+ z0 = svlsl_z (p0, z0, 7))
+
+/*
+** lsl_7_s8_z_untied:
+** movprfx z0\.b, p0/z, z1\.b
+** lsl z0\.b, p0/m, z0\.b, #7
+** ret
+*/
+TEST_UNIFORM_Z (lsl_7_s8_z_untied, svint8_t,
+ z0 = svlsl_n_s8_z (p0, z1, 7),
+ z0 = svlsl_z (p0, z1, 7))
+
+/*
+** lsl_8_s8_z_tied1:
+** mov (z[0-9]+\.d), #8
+** movprfx z0\.b, p0/z, z0\.b
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_8_s8_z_tied1, svint8_t,
+ z0 = svlsl_n_s8_z (p0, z0, 8),
+ z0 = svlsl_z (p0, z0, 8))
+
+/*
+** lsl_8_s8_z_untied:
+** mov (z[0-9]+\.d), #8
+** movprfx z0\.b, p0/z, z1\.b
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_8_s8_z_untied, svint8_t,
+ z0 = svlsl_n_s8_z (p0, z1, 8),
+ z0 = svlsl_z (p0, z1, 8))
+
+/*
+** lsl_s8_x_tied1:
+** lsl z0\.b, p0/m, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s8_x_tied1, svint8_t,
+ z0 = svlsl_s8_x (p0, z0, z1),
+ z0 = svlsl_x (p0, z0, z1))
+
+/*
+** lsl_s8_x_untied:
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s8_x_untied, svint8_t,
+ z0 = svlsl_s8_m (p0, z1, z2),
+ z0 = svlsl_s8_m (p0, z1, z2))
+
+/*
+** lsl_s8_x_tied2:
+** lslr z1\.b, p0/m, z1\.b, z0\.b
+** ret
+*/
+TEST_UNIFORM_Z (lsl_s8_x_tied2, svint8_t,
+ z1 = svlsl_s8_x (p0, z0, z1),
+ z1 = svlsl_x (p0, z0, z1))
+
+/*
+** lsl_w0_s8_x_tied1:
+** mov (z[0-9]+\.d), x0
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s8_x_tied1, svint8_t, uint64_t,
+ z0 = svlsl_n_s8_x (p0, z0, x0),
+ z0 = svlsl_x (p0, z0, x0))
+
+/*
+** lsl_w0_s8_x_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_s8_x_untied, svint8_t, uint64_t,
+ z0 = svlsl_n_s8_x (p0, z1, x0),
+ z0 = svlsl_x (p0, z1, x0))
+
+/*
+** lsl_d0_s8_x_tied1:
+** mov (z[0-9]+\.d), d0
+** lsl z1\.b, p0/m, z1\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s8_x_tied1, svint8_t, uint64_t,
+ z1 = svlsl_n_s8_x (p0, z1, d0),
+ z1 = svlsl_x (p0, z1, d0))
+
+/*
+** lsl_d0_s8_x_untied:
+** mov (z[0-9]+\.d), d0
+** movprfx z1, z2
+** lsl z1\.b, p0/m, z1\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_s8_x_untied, svint8_t, uint64_t,
+ z1 = svlsl_n_s8_x (p0, z2, d0),
+ z1 = svlsl_x (p0, z2, d0))
+
+/*
+** lsl_0_s8_x_tied1:
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_s8_x_tied1, svint8_t,
+ z0 = svlsl_n_s8_x (p0, z0, 0),
+ z0 = svlsl_x (p0, z0, 0))
+
+/*
+** lsl_0_s8_x_untied:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_s8_x_untied, svint8_t,
+ z0 = svlsl_n_s8_x (p0, z1, 0),
+ z0 = svlsl_x (p0, z1, 0))
+
+/*
+** lsl_1_s8_x_tied1:
+** lsl z0\.b, z0\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s8_x_tied1, svint8_t,
+ z0 = svlsl_n_s8_x (p0, z0, 1),
+ z0 = svlsl_x (p0, z0, 1))
+
+/*
+** lsl_1_s8_x_untied:
+** lsl z0\.b, z1\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_s8_x_untied, svint8_t,
+ z0 = svlsl_n_s8_x (p0, z1, 1),
+ z0 = svlsl_x (p0, z1, 1))
+
+/*
+** lsl_m1_s8_x_tied1:
+** mov (z[0-9]+)\.b, #-1
+** lsl z0\.b, p0/m, z0\.b, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s8_x_tied1, svint8_t,
+ z0 = svlsl_n_s8_x (p0, z0, -1),
+ z0 = svlsl_x (p0, z0, -1))
+
+/*
+** lsl_m1_s8_x_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_s8_x_untied, svint8_t,
+ z0 = svlsl_n_s8_x (p0, z1, -1),
+ z0 = svlsl_x (p0, z1, -1))
+
+/*
+** lsl_7_s8_x_tied1:
+** lsl z0\.b, z0\.b, #7
+** ret
+*/
+TEST_UNIFORM_Z (lsl_7_s8_x_tied1, svint8_t,
+ z0 = svlsl_n_s8_x (p0, z0, 7),
+ z0 = svlsl_x (p0, z0, 7))
+
+/*
+** lsl_7_s8_x_untied:
+** lsl z0\.b, z1\.b, #7
+** ret
+*/
+TEST_UNIFORM_Z (lsl_7_s8_x_untied, svint8_t,
+ z0 = svlsl_n_s8_x (p0, z1, 7),
+ z0 = svlsl_x (p0, z1, 7))
+
+/*
+** lsl_8_s8_x_tied1:
+** mov (z[0-9]+\.d), #8
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_8_s8_x_tied1, svint8_t,
+ z0 = svlsl_n_s8_x (p0, z0, 8),
+ z0 = svlsl_x (p0, z0, 8))
+
+/*
+** lsl_8_s8_x_untied:
+** mov (z[0-9]+\.d), #8
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_8_s8_x_untied, svint8_t,
+ z0 = svlsl_n_s8_x (p0, z1, 8),
+ z0 = svlsl_x (p0, z1, 8))
new file mode 100644
@@ -0,0 +1,511 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** lsl_u16_m_tied1:
+** lsl z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u16_m_tied1, svuint16_t,
+ z0 = svlsl_u16_m (p0, z0, z1),
+ z0 = svlsl_m (p0, z0, z1))
+
+/*
+** lsl_u16_m_untied:
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u16_m_untied, svuint16_t,
+ z0 = svlsl_u16_m (p0, z1, z2),
+ z0 = svlsl_m (p0, z1, z2))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_UNIFORM_Z (lsl_u16_m_tied2, svuint16_t,
+ z1 = svlsl_u16_m (p0, z0, z1),
+ z1 = svlsl_m (p0, z0, z1))
+
+/*
+** lsl_w0_u16_m_tied1:
+** mov (z[0-9]+\.d), x0
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u16_m_tied1, svuint16_t, uint64_t,
+ z0 = svlsl_n_u16_m (p0, z0, x0),
+ z0 = svlsl_m (p0, z0, x0))
+
+/*
+** lsl_w0_u16_m_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u16_m_untied, svuint16_t, uint64_t,
+ z0 = svlsl_n_u16_m (p0, z1, x0),
+ z0 = svlsl_m (p0, z1, x0))
+
+/*
+** lsl_d0_u16_m_tied1:
+** mov (z[0-9]+\.d), d0
+** lsl z1\.h, p0/m, z1\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u16_m_tied1, svuint16_t, uint64_t,
+ z1 = svlsl_n_u16_m (p0, z1, d0),
+ z1 = svlsl_m (p0, z1, d0))
+
+/*
+** lsl_d0_u16_m_untied:
+** mov (z[0-9]+\.d), d0
+** movprfx z1, z2
+** lsl z1\.h, p0/m, z1\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u16_m_untied, svuint16_t, uint64_t,
+ z1 = svlsl_n_u16_m (p0, z2, d0),
+ z1 = svlsl_m (p0, z2, d0))
+
+/*
+** lsl_0_u16_m_tied1:
+** sel z0\.h, p0, z0\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_0_u16_m_tied1, svuint16_t, uint64_t,
+ z0 = svlsl_n_u16_m (p0, z0, 0),
+ z0 = svlsl_m (p0, z0, 0))
+
+/*
+** lsl_0_u16_m_untied:
+** sel z0\.h, p0, z1\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_0_u16_m_untied, svuint16_t, uint64_t,
+ z0 = svlsl_n_u16_m (p0, z1, 0),
+ z0 = svlsl_m (p0, z1, 0))
+
+/*
+** lsl_1_u16_m_tied1:
+** lsl z0\.h, p0/m, z0\.h, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u16_m_tied1, svuint16_t,
+ z0 = svlsl_n_u16_m (p0, z0, 1),
+ z0 = svlsl_m (p0, z0, 1))
+
+/*
+** lsl_1_u16_m_untied:
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u16_m_untied, svuint16_t,
+ z0 = svlsl_n_u16_m (p0, z1, 1),
+ z0 = svlsl_m (p0, z1, 1))
+
+/*
+** lsl_m1_u16_m_tied1:
+** mov (z[0-9]+)\.b, #-1
+** lsl z0\.h, p0/m, z0\.h, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u16_m_tied1, svuint16_t,
+ z0 = svlsl_n_u16_m (p0, z0, -1),
+ z0 = svlsl_m (p0, z0, -1))
+
+/*
+** lsl_m1_u16_m_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u16_m_untied, svuint16_t,
+ z0 = svlsl_n_u16_m (p0, z1, -1),
+ z0 = svlsl_m (p0, z1, -1))
+
+/*
+** lsl_15_u16_m_tied1:
+** lsl z0\.h, p0/m, z0\.h, #15
+** ret
+*/
+TEST_UNIFORM_Z (lsl_15_u16_m_tied1, svuint16_t,
+ z0 = svlsl_n_u16_m (p0, z0, 15),
+ z0 = svlsl_m (p0, z0, 15))
+
+/*
+** lsl_15_u16_m_untied:
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, #15
+** ret
+*/
+TEST_UNIFORM_Z (lsl_15_u16_m_untied, svuint16_t,
+ z0 = svlsl_n_u16_m (p0, z1, 15),
+ z0 = svlsl_m (p0, z1, 15))
+
+/*
+** lsl_16_u16_m_tied1:
+** mov (z[0-9]+\.d), #16
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_16_u16_m_tied1, svuint16_t,
+ z0 = svlsl_n_u16_m (p0, z0, 16),
+ z0 = svlsl_m (p0, z0, 16))
+
+/*
+** lsl_16_u16_m_untied:
+** mov (z[0-9]+\.d), #16
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_16_u16_m_untied, svuint16_t,
+ z0 = svlsl_n_u16_m (p0, z1, 16),
+ z0 = svlsl_m (p0, z1, 16))
+
+/*
+** lsl_u16_z_tied1:
+** movprfx z0\.h, p0/z, z0\.h
+** lsl z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u16_z_tied1, svuint16_t,
+ z0 = svlsl_u16_z (p0, z0, z1),
+ z0 = svlsl_z (p0, z0, z1))
+
+/*
+** lsl_u16_z_untied:
+** movprfx z0\.h, p0/z, z1\.h
+** lsl z0\.h, p0/m, z0\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u16_z_untied, svuint16_t,
+ z0 = svlsl_u16_z (p0, z1, z2),
+ z0 = svlsl_z (p0, z1, z2))
+
+/*
+** lsl_u16_z_tied2:
+** movprfx z0\.h, p0/z, z0\.h
+** lslr z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u16_z_tied2, svuint16_t,
+ z0 = svlsl_u16_z (p0, z1, z0),
+ z0 = svlsl_z (p0, z1, z0))
+
+/*
+** lsl_w0_u16_z_tied1:
+** mov (z[0-9]+\.d), x0
+** movprfx z0\.h, p0/z, z0\.h
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u16_z_tied1, svuint16_t, uint64_t,
+ z0 = svlsl_n_u16_z (p0, z0, x0),
+ z0 = svlsl_z (p0, z0, x0))
+
+/*
+** lsl_w0_u16_z_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0\.h, p0/z, z1\.h
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u16_z_untied, svuint16_t, uint64_t,
+ z0 = svlsl_n_u16_z (p0, z1, x0),
+ z0 = svlsl_z (p0, z1, x0))
+
+/*
+** lsl_d0_u16_z_tied1:
+** mov (z[0-9d]+\.d), d0
+** movprfx z1\.h, p0/z, z1\.h
+** lsl z1\.h, p0/m, z1\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u16_z_tied1, svuint16_t, uint64_t,
+ z1 = svlsl_n_u16_z (p0, z1, d0),
+ z1 = svlsl_z (p0, z1, d0))
+
+/*
+** lsl_d0_u16_z_untied:
+** mov (z[0-9d]+\.d), d0
+** movprfx z1\.h, p0/z, z2\.h
+** lsl z1\.h, p0/m, z1\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u16_z_untied, svuint16_t, uint64_t,
+ z1 = svlsl_n_u16_z (p0, z2, d0),
+ z1 = svlsl_z (p0, z2, d0))
+
+/*
+** lsl_0_u16_z_tied1:
+** movprfx z0\.h, p0/z, z0\.h
+** lsl z0\.h, p0/m, z0\.h, #0
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_u16_z_tied1, svuint16_t,
+ z0 = svlsl_n_u16_z (p0, z0, 0),
+ z0 = svlsl_z (p0, z0, 0))
+
+/*
+** lsl_0_u16_z_untied:
+** movprfx z0\.h, p0/z, z1\.h
+** lsl z0\.h, p0/m, z0\.h, #0
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_u16_z_untied, svuint16_t,
+ z0 = svlsl_n_u16_z (p0, z1, 0),
+ z0 = svlsl_z (p0, z1, 0))
+
+/*
+** lsl_1_u16_z_tied1:
+** movprfx z0\.h, p0/z, z0\.h
+** lsl z0\.h, p0/m, z0\.h, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u16_z_tied1, svuint16_t,
+ z0 = svlsl_n_u16_z (p0, z0, 1),
+ z0 = svlsl_z (p0, z0, 1))
+
+/*
+** lsl_1_u16_z_untied:
+** movprfx z0\.h, p0/z, z1\.h
+** lsl z0\.h, p0/m, z0\.h, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u16_z_untied, svuint16_t,
+ z0 = svlsl_n_u16_z (p0, z1, 1),
+ z0 = svlsl_z (p0, z1, 1))
+
+/*
+** lsl_m1_u16_z_tied1:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0\.h, p0/z, z0\.h
+** lsl z0\.h, p0/m, z0\.h, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u16_z_tied1, svuint16_t,
+ z0 = svlsl_n_u16_z (p0, z0, -1),
+ z0 = svlsl_z (p0, z0, -1))
+
+/*
+** lsl_m1_u16_z_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0\.h, p0/z, z1\.h
+** lsl z0\.h, p0/m, z0\.h, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u16_z_untied, svuint16_t,
+ z0 = svlsl_n_u16_z (p0, z1, -1),
+ z0 = svlsl_z (p0, z1, -1))
+
+/*
+** lsl_15_u16_z_tied1:
+** movprfx z0\.h, p0/z, z0\.h
+** lsl z0\.h, p0/m, z0\.h, #15
+** ret
+*/
+TEST_UNIFORM_Z (lsl_15_u16_z_tied1, svuint16_t,
+ z0 = svlsl_n_u16_z (p0, z0, 15),
+ z0 = svlsl_z (p0, z0, 15))
+
+/*
+** lsl_15_u16_z_untied:
+** movprfx z0\.h, p0/z, z1\.h
+** lsl z0\.h, p0/m, z0\.h, #15
+** ret
+*/
+TEST_UNIFORM_Z (lsl_15_u16_z_untied, svuint16_t,
+ z0 = svlsl_n_u16_z (p0, z1, 15),
+ z0 = svlsl_z (p0, z1, 15))
+
+/*
+** lsl_16_u16_z_tied1:
+** mov (z[0-9]+\.d), #16
+** movprfx z0\.h, p0/z, z0\.h
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_16_u16_z_tied1, svuint16_t,
+ z0 = svlsl_n_u16_z (p0, z0, 16),
+ z0 = svlsl_z (p0, z0, 16))
+
+/*
+** lsl_16_u16_z_untied:
+** mov (z[0-9]+\.d), #16
+** movprfx z0\.h, p0/z, z1\.h
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_16_u16_z_untied, svuint16_t,
+ z0 = svlsl_n_u16_z (p0, z1, 16),
+ z0 = svlsl_z (p0, z1, 16))
+
+/*
+** lsl_u16_x_tied1:
+** lsl z0\.h, p0/m, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u16_x_tied1, svuint16_t,
+ z0 = svlsl_u16_x (p0, z0, z1),
+ z0 = svlsl_x (p0, z0, z1))
+
+/*
+** lsl_u16_x_untied:
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u16_x_untied, svuint16_t,
+ z0 = svlsl_u16_m (p0, z1, z2),
+ z0 = svlsl_u16_m (p0, z1, z2))
+
+/*
+** lsl_u16_x_tied2:
+** lslr z1\.h, p0/m, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u16_x_tied2, svuint16_t,
+ z1 = svlsl_u16_x (p0, z0, z1),
+ z1 = svlsl_x (p0, z0, z1))
+
+/*
+** lsl_w0_u16_x_tied1:
+** mov (z[0-9]+\.d), x0
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u16_x_tied1, svuint16_t, uint64_t,
+ z0 = svlsl_n_u16_x (p0, z0, x0),
+ z0 = svlsl_x (p0, z0, x0))
+
+/*
+** lsl_w0_u16_x_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u16_x_untied, svuint16_t, uint64_t,
+ z0 = svlsl_n_u16_x (p0, z1, x0),
+ z0 = svlsl_x (p0, z1, x0))
+
+/*
+** lsl_d0_u16_x_tied1:
+** mov (z[0-9]+\.d), d0
+** lsl z1\.h, p0/m, z1\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u16_x_tied1, svuint16_t, uint64_t,
+ z1 = svlsl_n_u16_x (p0, z1, d0),
+ z1 = svlsl_x (p0, z1, d0))
+
+/*
+** lsl_d0_u16_x_untied:
+** mov (z[0-9]+\.d), d0
+** movprfx z1, z2
+** lsl z1\.h, p0/m, z1\.h, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u16_x_untied, svuint16_t, uint64_t,
+ z1 = svlsl_n_u16_x (p0, z2, d0),
+ z1 = svlsl_x (p0, z2, d0))
+
+/*
+** lsl_0_u16_x_tied1:
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_u16_x_tied1, svuint16_t,
+ z0 = svlsl_n_u16_x (p0, z0, 0),
+ z0 = svlsl_x (p0, z0, 0))
+
+/*
+** lsl_0_u16_x_untied:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_u16_x_untied, svuint16_t,
+ z0 = svlsl_n_u16_x (p0, z1, 0),
+ z0 = svlsl_x (p0, z1, 0))
+
+/*
+** lsl_1_u16_x_tied1:
+** lsl z0\.h, z0\.h, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u16_x_tied1, svuint16_t,
+ z0 = svlsl_n_u16_x (p0, z0, 1),
+ z0 = svlsl_x (p0, z0, 1))
+
+/*
+** lsl_1_u16_x_untied:
+** lsl z0\.h, z1\.h, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u16_x_untied, svuint16_t,
+ z0 = svlsl_n_u16_x (p0, z1, 1),
+ z0 = svlsl_x (p0, z1, 1))
+
+/*
+** lsl_m1_u16_x_tied1:
+** mov (z[0-9]+)\.b, #-1
+** lsl z0\.h, p0/m, z0\.h, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u16_x_tied1, svuint16_t,
+ z0 = svlsl_n_u16_x (p0, z0, -1),
+ z0 = svlsl_x (p0, z0, -1))
+
+/*
+** lsl_m1_u16_x_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u16_x_untied, svuint16_t,
+ z0 = svlsl_n_u16_x (p0, z1, -1),
+ z0 = svlsl_x (p0, z1, -1))
+
+/*
+** lsl_15_u16_x_tied1:
+** lsl z0\.h, z0\.h, #15
+** ret
+*/
+TEST_UNIFORM_Z (lsl_15_u16_x_tied1, svuint16_t,
+ z0 = svlsl_n_u16_x (p0, z0, 15),
+ z0 = svlsl_x (p0, z0, 15))
+
+/*
+** lsl_15_u16_x_untied:
+** lsl z0\.h, z1\.h, #15
+** ret
+*/
+TEST_UNIFORM_Z (lsl_15_u16_x_untied, svuint16_t,
+ z0 = svlsl_n_u16_x (p0, z1, 15),
+ z0 = svlsl_x (p0, z1, 15))
+
+/*
+** lsl_16_u16_x_tied1:
+** mov (z[0-9]+\.d), #16
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_16_u16_x_tied1, svuint16_t,
+ z0 = svlsl_n_u16_x (p0, z0, 16),
+ z0 = svlsl_x (p0, z0, 16))
+
+/*
+** lsl_16_u16_x_untied:
+** mov (z[0-9]+\.d), #16
+** movprfx z0, z1
+** lsl z0\.h, p0/m, z0\.h, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_16_u16_x_untied, svuint16_t,
+ z0 = svlsl_n_u16_x (p0, z1, 16),
+ z0 = svlsl_x (p0, z1, 16))
new file mode 100644
@@ -0,0 +1,511 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** lsl_u32_m_tied1:
+** lsl z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u32_m_tied1, svuint32_t,
+ z0 = svlsl_u32_m (p0, z0, z1),
+ z0 = svlsl_m (p0, z0, z1))
+
+/*
+** lsl_u32_m_untied:
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u32_m_untied, svuint32_t,
+ z0 = svlsl_u32_m (p0, z1, z2),
+ z0 = svlsl_m (p0, z1, z2))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_UNIFORM_Z (lsl_u32_m_tied2, svuint32_t,
+ z1 = svlsl_u32_m (p0, z0, z1),
+ z1 = svlsl_m (p0, z0, z1))
+
+/*
+** lsl_w0_u32_m_tied1:
+** mov (z[0-9]+\.d), x0
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u32_m_tied1, svuint32_t, uint64_t,
+ z0 = svlsl_n_u32_m (p0, z0, x0),
+ z0 = svlsl_m (p0, z0, x0))
+
+/*
+** lsl_w0_u32_m_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u32_m_untied, svuint32_t, uint64_t,
+ z0 = svlsl_n_u32_m (p0, z1, x0),
+ z0 = svlsl_m (p0, z1, x0))
+
+/*
+** lsl_d0_u32_m_tied1:
+** mov (z[0-9]+\.d), d0
+** lsl z1\.s, p0/m, z1\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u32_m_tied1, svuint32_t, uint64_t,
+ z1 = svlsl_n_u32_m (p0, z1, d0),
+ z1 = svlsl_m (p0, z1, d0))
+
+/*
+** lsl_d0_u32_m_untied:
+** mov (z[0-9]+\.d), d0
+** movprfx z1, z2
+** lsl z1\.s, p0/m, z1\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u32_m_untied, svuint32_t, uint64_t,
+ z1 = svlsl_n_u32_m (p0, z2, d0),
+ z1 = svlsl_m (p0, z2, d0))
+
+/*
+** lsl_0_u32_m_tied1:
+** sel z0\.s, p0, z0\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_0_u32_m_tied1, svuint32_t, uint64_t,
+ z0 = svlsl_n_u32_m (p0, z0, 0),
+ z0 = svlsl_m (p0, z0, 0))
+
+/*
+** lsl_0_u32_m_untied:
+** sel z0\.s, p0, z1\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_0_u32_m_untied, svuint32_t, uint64_t,
+ z0 = svlsl_n_u32_m (p0, z1, 0),
+ z0 = svlsl_m (p0, z1, 0))
+
+/*
+** lsl_1_u32_m_tied1:
+** lsl z0\.s, p0/m, z0\.s, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u32_m_tied1, svuint32_t,
+ z0 = svlsl_n_u32_m (p0, z0, 1),
+ z0 = svlsl_m (p0, z0, 1))
+
+/*
+** lsl_1_u32_m_untied:
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u32_m_untied, svuint32_t,
+ z0 = svlsl_n_u32_m (p0, z1, 1),
+ z0 = svlsl_m (p0, z1, 1))
+
+/*
+** lsl_m1_u32_m_tied1:
+** mov (z[0-9]+)\.b, #-1
+** lsl z0\.s, p0/m, z0\.s, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u32_m_tied1, svuint32_t,
+ z0 = svlsl_n_u32_m (p0, z0, -1),
+ z0 = svlsl_m (p0, z0, -1))
+
+/*
+** lsl_m1_u32_m_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u32_m_untied, svuint32_t,
+ z0 = svlsl_n_u32_m (p0, z1, -1),
+ z0 = svlsl_m (p0, z1, -1))
+
+/*
+** lsl_31_u32_m_tied1:
+** lsl z0\.s, p0/m, z0\.s, #31
+** ret
+*/
+TEST_UNIFORM_Z (lsl_31_u32_m_tied1, svuint32_t,
+ z0 = svlsl_n_u32_m (p0, z0, 31),
+ z0 = svlsl_m (p0, z0, 31))
+
+/*
+** lsl_31_u32_m_untied:
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, #31
+** ret
+*/
+TEST_UNIFORM_Z (lsl_31_u32_m_untied, svuint32_t,
+ z0 = svlsl_n_u32_m (p0, z1, 31),
+ z0 = svlsl_m (p0, z1, 31))
+
+/*
+** lsl_32_u32_m_tied1:
+** mov (z[0-9]+\.d), #32
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_32_u32_m_tied1, svuint32_t,
+ z0 = svlsl_n_u32_m (p0, z0, 32),
+ z0 = svlsl_m (p0, z0, 32))
+
+/*
+** lsl_32_u32_m_untied:
+** mov (z[0-9]+\.d), #32
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_32_u32_m_untied, svuint32_t,
+ z0 = svlsl_n_u32_m (p0, z1, 32),
+ z0 = svlsl_m (p0, z1, 32))
+
+/*
+** lsl_u32_z_tied1:
+** movprfx z0\.s, p0/z, z0\.s
+** lsl z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u32_z_tied1, svuint32_t,
+ z0 = svlsl_u32_z (p0, z0, z1),
+ z0 = svlsl_z (p0, z0, z1))
+
+/*
+** lsl_u32_z_untied:
+** movprfx z0\.s, p0/z, z1\.s
+** lsl z0\.s, p0/m, z0\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u32_z_untied, svuint32_t,
+ z0 = svlsl_u32_z (p0, z1, z2),
+ z0 = svlsl_z (p0, z1, z2))
+
+/*
+** lsl_u32_z_tied2:
+** movprfx z0\.s, p0/z, z0\.s
+** lslr z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u32_z_tied2, svuint32_t,
+ z0 = svlsl_u32_z (p0, z1, z0),
+ z0 = svlsl_z (p0, z1, z0))
+
+/*
+** lsl_w0_u32_z_tied1:
+** mov (z[0-9]+\.d), x0
+** movprfx z0\.s, p0/z, z0\.s
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u32_z_tied1, svuint32_t, uint64_t,
+ z0 = svlsl_n_u32_z (p0, z0, x0),
+ z0 = svlsl_z (p0, z0, x0))
+
+/*
+** lsl_w0_u32_z_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0\.s, p0/z, z1\.s
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u32_z_untied, svuint32_t, uint64_t,
+ z0 = svlsl_n_u32_z (p0, z1, x0),
+ z0 = svlsl_z (p0, z1, x0))
+
+/*
+** lsl_d0_u32_z_tied1:
+** mov (z[0-9d]+\.d), d0
+** movprfx z1\.s, p0/z, z1\.s
+** lsl z1\.s, p0/m, z1\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u32_z_tied1, svuint32_t, uint64_t,
+ z1 = svlsl_n_u32_z (p0, z1, d0),
+ z1 = svlsl_z (p0, z1, d0))
+
+/*
+** lsl_d0_u32_z_untied:
+** mov (z[0-9d]+\.d), d0
+** movprfx z1\.s, p0/z, z2\.s
+** lsl z1\.s, p0/m, z1\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u32_z_untied, svuint32_t, uint64_t,
+ z1 = svlsl_n_u32_z (p0, z2, d0),
+ z1 = svlsl_z (p0, z2, d0))
+
+/*
+** lsl_0_u32_z_tied1:
+** movprfx z0\.s, p0/z, z0\.s
+** lsl z0\.s, p0/m, z0\.s, #0
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_u32_z_tied1, svuint32_t,
+ z0 = svlsl_n_u32_z (p0, z0, 0),
+ z0 = svlsl_z (p0, z0, 0))
+
+/*
+** lsl_0_u32_z_untied:
+** movprfx z0\.s, p0/z, z1\.s
+** lsl z0\.s, p0/m, z0\.s, #0
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_u32_z_untied, svuint32_t,
+ z0 = svlsl_n_u32_z (p0, z1, 0),
+ z0 = svlsl_z (p0, z1, 0))
+
+/*
+** lsl_1_u32_z_tied1:
+** movprfx z0\.s, p0/z, z0\.s
+** lsl z0\.s, p0/m, z0\.s, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u32_z_tied1, svuint32_t,
+ z0 = svlsl_n_u32_z (p0, z0, 1),
+ z0 = svlsl_z (p0, z0, 1))
+
+/*
+** lsl_1_u32_z_untied:
+** movprfx z0\.s, p0/z, z1\.s
+** lsl z0\.s, p0/m, z0\.s, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u32_z_untied, svuint32_t,
+ z0 = svlsl_n_u32_z (p0, z1, 1),
+ z0 = svlsl_z (p0, z1, 1))
+
+/*
+** lsl_m1_u32_z_tied1:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0\.s, p0/z, z0\.s
+** lsl z0\.s, p0/m, z0\.s, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u32_z_tied1, svuint32_t,
+ z0 = svlsl_n_u32_z (p0, z0, -1),
+ z0 = svlsl_z (p0, z0, -1))
+
+/*
+** lsl_m1_u32_z_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0\.s, p0/z, z1\.s
+** lsl z0\.s, p0/m, z0\.s, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u32_z_untied, svuint32_t,
+ z0 = svlsl_n_u32_z (p0, z1, -1),
+ z0 = svlsl_z (p0, z1, -1))
+
+/*
+** lsl_31_u32_z_tied1:
+** movprfx z0\.s, p0/z, z0\.s
+** lsl z0\.s, p0/m, z0\.s, #31
+** ret
+*/
+TEST_UNIFORM_Z (lsl_31_u32_z_tied1, svuint32_t,
+ z0 = svlsl_n_u32_z (p0, z0, 31),
+ z0 = svlsl_z (p0, z0, 31))
+
+/*
+** lsl_31_u32_z_untied:
+** movprfx z0\.s, p0/z, z1\.s
+** lsl z0\.s, p0/m, z0\.s, #31
+** ret
+*/
+TEST_UNIFORM_Z (lsl_31_u32_z_untied, svuint32_t,
+ z0 = svlsl_n_u32_z (p0, z1, 31),
+ z0 = svlsl_z (p0, z1, 31))
+
+/*
+** lsl_32_u32_z_tied1:
+** mov (z[0-9]+\.d), #32
+** movprfx z0\.s, p0/z, z0\.s
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_32_u32_z_tied1, svuint32_t,
+ z0 = svlsl_n_u32_z (p0, z0, 32),
+ z0 = svlsl_z (p0, z0, 32))
+
+/*
+** lsl_32_u32_z_untied:
+** mov (z[0-9]+\.d), #32
+** movprfx z0\.s, p0/z, z1\.s
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_32_u32_z_untied, svuint32_t,
+ z0 = svlsl_n_u32_z (p0, z1, 32),
+ z0 = svlsl_z (p0, z1, 32))
+
+/*
+** lsl_u32_x_tied1:
+** lsl z0\.s, p0/m, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u32_x_tied1, svuint32_t,
+ z0 = svlsl_u32_x (p0, z0, z1),
+ z0 = svlsl_x (p0, z0, z1))
+
+/*
+** lsl_u32_x_untied:
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u32_x_untied, svuint32_t,
+ z0 = svlsl_u32_m (p0, z1, z2),
+ z0 = svlsl_u32_m (p0, z1, z2))
+
+/*
+** lsl_u32_x_tied2:
+** lslr z1\.s, p0/m, z1\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u32_x_tied2, svuint32_t,
+ z1 = svlsl_u32_x (p0, z0, z1),
+ z1 = svlsl_x (p0, z0, z1))
+
+/*
+** lsl_w0_u32_x_tied1:
+** mov (z[0-9]+\.d), x0
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u32_x_tied1, svuint32_t, uint64_t,
+ z0 = svlsl_n_u32_x (p0, z0, x0),
+ z0 = svlsl_x (p0, z0, x0))
+
+/*
+** lsl_w0_u32_x_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u32_x_untied, svuint32_t, uint64_t,
+ z0 = svlsl_n_u32_x (p0, z1, x0),
+ z0 = svlsl_x (p0, z1, x0))
+
+/*
+** lsl_d0_u32_x_tied1:
+** mov (z[0-9]+\.d), d0
+** lsl z1\.s, p0/m, z1\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u32_x_tied1, svuint32_t, uint64_t,
+ z1 = svlsl_n_u32_x (p0, z1, d0),
+ z1 = svlsl_x (p0, z1, d0))
+
+/*
+** lsl_d0_u32_x_untied:
+** mov (z[0-9]+\.d), d0
+** movprfx z1, z2
+** lsl z1\.s, p0/m, z1\.s, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u32_x_untied, svuint32_t, uint64_t,
+ z1 = svlsl_n_u32_x (p0, z2, d0),
+ z1 = svlsl_x (p0, z2, d0))
+
+/*
+** lsl_0_u32_x_tied1:
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_u32_x_tied1, svuint32_t,
+ z0 = svlsl_n_u32_x (p0, z0, 0),
+ z0 = svlsl_x (p0, z0, 0))
+
+/*
+** lsl_0_u32_x_untied:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_u32_x_untied, svuint32_t,
+ z0 = svlsl_n_u32_x (p0, z1, 0),
+ z0 = svlsl_x (p0, z1, 0))
+
+/*
+** lsl_1_u32_x_tied1:
+** lsl z0\.s, z0\.s, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u32_x_tied1, svuint32_t,
+ z0 = svlsl_n_u32_x (p0, z0, 1),
+ z0 = svlsl_x (p0, z0, 1))
+
+/*
+** lsl_1_u32_x_untied:
+** lsl z0\.s, z1\.s, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u32_x_untied, svuint32_t,
+ z0 = svlsl_n_u32_x (p0, z1, 1),
+ z0 = svlsl_x (p0, z1, 1))
+
+/*
+** lsl_m1_u32_x_tied1:
+** mov (z[0-9]+)\.b, #-1
+** lsl z0\.s, p0/m, z0\.s, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u32_x_tied1, svuint32_t,
+ z0 = svlsl_n_u32_x (p0, z0, -1),
+ z0 = svlsl_x (p0, z0, -1))
+
+/*
+** lsl_m1_u32_x_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u32_x_untied, svuint32_t,
+ z0 = svlsl_n_u32_x (p0, z1, -1),
+ z0 = svlsl_x (p0, z1, -1))
+
+/*
+** lsl_31_u32_x_tied1:
+** lsl z0\.s, z0\.s, #31
+** ret
+*/
+TEST_UNIFORM_Z (lsl_31_u32_x_tied1, svuint32_t,
+ z0 = svlsl_n_u32_x (p0, z0, 31),
+ z0 = svlsl_x (p0, z0, 31))
+
+/*
+** lsl_31_u32_x_untied:
+** lsl z0\.s, z1\.s, #31
+** ret
+*/
+TEST_UNIFORM_Z (lsl_31_u32_x_untied, svuint32_t,
+ z0 = svlsl_n_u32_x (p0, z1, 31),
+ z0 = svlsl_x (p0, z1, 31))
+
+/*
+** lsl_32_u32_x_tied1:
+** mov (z[0-9]+\.d), #32
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_32_u32_x_tied1, svuint32_t,
+ z0 = svlsl_n_u32_x (p0, z0, 32),
+ z0 = svlsl_x (p0, z0, 32))
+
+/*
+** lsl_32_u32_x_untied:
+** mov (z[0-9]+\.d), #32
+** movprfx z0, z1
+** lsl z0\.s, p0/m, z0\.s, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_32_u32_x_untied, svuint32_t,
+ z0 = svlsl_n_u32_x (p0, z1, 32),
+ z0 = svlsl_x (p0, z1, 32))
new file mode 100644
@@ -0,0 +1,507 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** lsl_u64_m_tied1:
+** lsl z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u64_m_tied1, svuint64_t,
+ z0 = svlsl_u64_m (p0, z0, z1),
+ z0 = svlsl_m (p0, z0, z1))
+
+/*
+** lsl_u64_m_untied:
+** movprfx z0, z1
+** lsl z0\.d, p0/m, z0\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u64_m_untied, svuint64_t,
+ z0 = svlsl_u64_m (p0, z1, z2),
+ z0 = svlsl_m (p0, z1, z2))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_UNIFORM_Z (lsl_u64_m_tied2, svuint64_t,
+ z1 = svlsl_u64_m (p0, z0, z1),
+ z1 = svlsl_m (p0, z0, z1))
+
+/*
+** lsl_w0_u64_m_tied1:
+** mov (z[0-9]+\.d), x0
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u64_m_tied1, svuint64_t, uint64_t,
+ z0 = svlsl_n_u64_m (p0, z0, x0),
+ z0 = svlsl_m (p0, z0, x0))
+
+/*
+** lsl_w0_u64_m_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0, z1
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u64_m_untied, svuint64_t, uint64_t,
+ z0 = svlsl_n_u64_m (p0, z1, x0),
+ z0 = svlsl_m (p0, z1, x0))
+
+/*
+** lsl_d0_u64_m_tied1:
+** mov (z[0-9]+\.d), d0
+** lsl z1\.d, p0/m, z1\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u64_m_tied1, svuint64_t, uint64_t,
+ z1 = svlsl_n_u64_m (p0, z1, d0),
+ z1 = svlsl_m (p0, z1, d0))
+
+/*
+** lsl_d0_u64_m_untied:
+** mov (z[0-9]+\.d), d0
+** movprfx z1, z2
+** lsl z1\.d, p0/m, z1\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u64_m_untied, svuint64_t, uint64_t,
+ z1 = svlsl_n_u64_m (p0, z2, d0),
+ z1 = svlsl_m (p0, z2, d0))
+
+/*
+** lsl_0_u64_m_tied1:
+** sel z0\.d, p0, z0\.d, z0\.d
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_0_u64_m_tied1, svuint64_t, uint64_t,
+ z0 = svlsl_n_u64_m (p0, z0, 0),
+ z0 = svlsl_m (p0, z0, 0))
+
+/*
+** lsl_0_u64_m_untied:
+** sel z0\.d, p0, z1\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_0_u64_m_untied, svuint64_t, uint64_t,
+ z0 = svlsl_n_u64_m (p0, z1, 0),
+ z0 = svlsl_m (p0, z1, 0))
+
+/*
+** lsl_1_u64_m_tied1:
+** lsl z0\.d, p0/m, z0\.d, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u64_m_tied1, svuint64_t,
+ z0 = svlsl_n_u64_m (p0, z0, 1),
+ z0 = svlsl_m (p0, z0, 1))
+
+/*
+** lsl_1_u64_m_untied:
+** movprfx z0, z1
+** lsl z0\.d, p0/m, z0\.d, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u64_m_untied, svuint64_t,
+ z0 = svlsl_n_u64_m (p0, z1, 1),
+ z0 = svlsl_m (p0, z1, 1))
+
+/*
+** lsl_m1_u64_m_tied1:
+** mov (z[0-9]+)\.b, #-1
+** lsl z0\.d, p0/m, z0\.d, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u64_m_tied1, svuint64_t,
+ z0 = svlsl_n_u64_m (p0, z0, -1),
+ z0 = svlsl_m (p0, z0, -1))
+
+/*
+** lsl_m1_u64_m_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0, z1
+** lsl z0\.d, p0/m, z0\.d, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u64_m_untied, svuint64_t,
+ z0 = svlsl_n_u64_m (p0, z1, -1),
+ z0 = svlsl_m (p0, z1, -1))
+
+/*
+** lsl_63_u64_m_tied1:
+** lsl z0\.d, p0/m, z0\.d, #63
+** ret
+*/
+TEST_UNIFORM_Z (lsl_63_u64_m_tied1, svuint64_t,
+ z0 = svlsl_n_u64_m (p0, z0, 63),
+ z0 = svlsl_m (p0, z0, 63))
+
+/*
+** lsl_63_u64_m_untied:
+** movprfx z0, z1
+** lsl z0\.d, p0/m, z0\.d, #63
+** ret
+*/
+TEST_UNIFORM_Z (lsl_63_u64_m_untied, svuint64_t,
+ z0 = svlsl_n_u64_m (p0, z1, 63),
+ z0 = svlsl_m (p0, z1, 63))
+
+/*
+** lsl_64_u64_m_tied1:
+** mov (z[0-9]+\.d), #64
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_64_u64_m_tied1, svuint64_t,
+ z0 = svlsl_n_u64_m (p0, z0, 64),
+ z0 = svlsl_m (p0, z0, 64))
+
+/*
+** lsl_64_u64_m_untied:
+** mov (z[0-9]+\.d), #64
+** movprfx z0, z1
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_64_u64_m_untied, svuint64_t,
+ z0 = svlsl_n_u64_m (p0, z1, 64),
+ z0 = svlsl_m (p0, z1, 64))
+
+/*
+** lsl_u64_z_tied1:
+** movprfx z0\.d, p0/z, z0\.d
+** lsl z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u64_z_tied1, svuint64_t,
+ z0 = svlsl_u64_z (p0, z0, z1),
+ z0 = svlsl_z (p0, z0, z1))
+
+/*
+** lsl_u64_z_untied:
+** movprfx z0\.d, p0/z, z1\.d
+** lsl z0\.d, p0/m, z0\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u64_z_untied, svuint64_t,
+ z0 = svlsl_u64_z (p0, z1, z2),
+ z0 = svlsl_z (p0, z1, z2))
+
+/*
+** lsl_u64_z_tied2:
+** movprfx z0\.d, p0/z, z0\.d
+** lslr z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u64_z_tied2, svuint64_t,
+ z0 = svlsl_u64_z (p0, z1, z0),
+ z0 = svlsl_z (p0, z1, z0))
+
+/*
+** lsl_w0_u64_z_tied1:
+** mov (z[0-9]+\.d), x0
+** movprfx z0\.d, p0/z, z0\.d
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u64_z_tied1, svuint64_t, uint64_t,
+ z0 = svlsl_n_u64_z (p0, z0, x0),
+ z0 = svlsl_z (p0, z0, x0))
+
+/*
+** lsl_w0_u64_z_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0\.d, p0/z, z1\.d
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u64_z_untied, svuint64_t, uint64_t,
+ z0 = svlsl_n_u64_z (p0, z1, x0),
+ z0 = svlsl_z (p0, z1, x0))
+
+/*
+** lsl_d0_u64_z_tied1:
+** mov (z[0-9]+\.d), d0
+** movprfx z1\.d, p0/z, z1\.d
+** lsl z1\.d, p0/m, z1\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u64_z_tied1, svuint64_t, uint64_t,
+ z1 = svlsl_n_u64_z (p0, z1, d0),
+ z1 = svlsl_z (p0, z1, d0))
+
+/*
+** lsl_d0_u64_z_untied:
+** mov (z[0-9]+\.d), d0
+** movprfx z1\.d, p0/z, z2\.d
+** lsl z1\.d, p0/m, z1\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u64_z_untied, svuint64_t, uint64_t,
+ z1 = svlsl_n_u64_z (p0, z2, d0),
+ z1 = svlsl_z (p0, z2, d0))
+
+/*
+** lsl_0_u64_z_tied1:
+** movprfx z0\.d, p0/z, z0\.d
+** lsl z0\.d, p0/m, z0\.d, #0
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_u64_z_tied1, svuint64_t,
+ z0 = svlsl_n_u64_z (p0, z0, 0),
+ z0 = svlsl_z (p0, z0, 0))
+
+/*
+** lsl_0_u64_z_untied:
+** movprfx z0\.d, p0/z, z1\.d
+** lsl z0\.d, p0/m, z0\.d, #0
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_u64_z_untied, svuint64_t,
+ z0 = svlsl_n_u64_z (p0, z1, 0),
+ z0 = svlsl_z (p0, z1, 0))
+
+/*
+** lsl_1_u64_z_tied1:
+** movprfx z0\.d, p0/z, z0\.d
+** lsl z0\.d, p0/m, z0\.d, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u64_z_tied1, svuint64_t,
+ z0 = svlsl_n_u64_z (p0, z0, 1),
+ z0 = svlsl_z (p0, z0, 1))
+
+/*
+** lsl_1_u64_z_untied:
+** movprfx z0\.d, p0/z, z1\.d
+** lsl z0\.d, p0/m, z0\.d, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u64_z_untied, svuint64_t,
+ z0 = svlsl_n_u64_z (p0, z1, 1),
+ z0 = svlsl_z (p0, z1, 1))
+
+/*
+** lsl_m1_u64_z_tied1:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0\.d, p0/z, z0\.d
+** lsl z0\.d, p0/m, z0\.d, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u64_z_tied1, svuint64_t,
+ z0 = svlsl_n_u64_z (p0, z0, -1),
+ z0 = svlsl_z (p0, z0, -1))
+
+/*
+** lsl_m1_u64_z_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0\.d, p0/z, z1\.d
+** lsl z0\.d, p0/m, z0\.d, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u64_z_untied, svuint64_t,
+ z0 = svlsl_n_u64_z (p0, z1, -1),
+ z0 = svlsl_z (p0, z1, -1))
+
+/*
+** lsl_63_u64_z_tied1:
+** movprfx z0\.d, p0/z, z0\.d
+** lsl z0\.d, p0/m, z0\.d, #63
+** ret
+*/
+TEST_UNIFORM_Z (lsl_63_u64_z_tied1, svuint64_t,
+ z0 = svlsl_n_u64_z (p0, z0, 63),
+ z0 = svlsl_z (p0, z0, 63))
+
+/*
+** lsl_63_u64_z_untied:
+** movprfx z0\.d, p0/z, z1\.d
+** lsl z0\.d, p0/m, z0\.d, #63
+** ret
+*/
+TEST_UNIFORM_Z (lsl_63_u64_z_untied, svuint64_t,
+ z0 = svlsl_n_u64_z (p0, z1, 63),
+ z0 = svlsl_z (p0, z1, 63))
+
+/*
+** lsl_64_u64_z_tied1:
+** mov (z[0-9]+\.d), #64
+** movprfx z0\.d, p0/z, z0\.d
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_64_u64_z_tied1, svuint64_t,
+ z0 = svlsl_n_u64_z (p0, z0, 64),
+ z0 = svlsl_z (p0, z0, 64))
+
+/*
+** lsl_64_u64_z_untied:
+** mov (z[0-9]+\.d), #64
+** movprfx z0\.d, p0/z, z1\.d
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_64_u64_z_untied, svuint64_t,
+ z0 = svlsl_n_u64_z (p0, z1, 64),
+ z0 = svlsl_z (p0, z1, 64))
+
+/*
+** lsl_u64_x_tied1:
+** lsl z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u64_x_tied1, svuint64_t,
+ z0 = svlsl_u64_x (p0, z0, z1),
+ z0 = svlsl_x (p0, z0, z1))
+
+/*
+** lsl_u64_x_untied:
+** movprfx z0, z1
+** lsl z0\.d, p0/m, z0\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u64_x_untied, svuint64_t,
+ z0 = svlsl_u64_m (p0, z1, z2),
+ z0 = svlsl_u64_m (p0, z1, z2))
+
+/*
+** lsl_u64_x_tied2:
+** lslr z1\.d, p0/m, z1\.d, z0\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u64_x_tied2, svuint64_t,
+ z1 = svlsl_u64_x (p0, z0, z1),
+ z1 = svlsl_x (p0, z0, z1))
+
+/*
+** lsl_w0_u64_x_tied1:
+** mov (z[0-9]+\.d), x0
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u64_x_tied1, svuint64_t, uint64_t,
+ z0 = svlsl_n_u64_x (p0, z0, x0),
+ z0 = svlsl_x (p0, z0, x0))
+
+/*
+** lsl_w0_u64_x_untied:
+** mov z0\.d, x0
+** lslr z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u64_x_untied, svuint64_t, uint64_t,
+ z0 = svlsl_n_u64_x (p0, z1, x0),
+ z0 = svlsl_x (p0, z1, x0))
+
+/*
+** lsl_d0_u64_x_tied1:
+** mov (z[0-9]+\.d), d0
+** lsl z1\.d, p0/m, z1\.d, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u64_x_tied1, svuint64_t, uint64_t,
+ z1 = svlsl_n_u64_x (p0, z1, d0),
+ z1 = svlsl_x (p0, z1, d0))
+
+/*
+** lsl_d0_u64_x_untied:
+** mov z1\.d, d0
+** lslr z1\.d, p0/m, z1\.d, z2\.d
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u64_x_untied, svuint64_t, uint64_t,
+ z1 = svlsl_n_u64_x (p0, z2, d0),
+ z1 = svlsl_x (p0, z2, d0))
+
+/*
+** lsl_0_u64_x_tied1:
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_u64_x_tied1, svuint64_t,
+ z0 = svlsl_n_u64_x (p0, z0, 0),
+ z0 = svlsl_x (p0, z0, 0))
+
+/*
+** lsl_0_u64_x_untied:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_u64_x_untied, svuint64_t,
+ z0 = svlsl_n_u64_x (p0, z1, 0),
+ z0 = svlsl_x (p0, z1, 0))
+
+/*
+** lsl_1_u64_x_tied1:
+** lsl z0\.d, z0\.d, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u64_x_tied1, svuint64_t,
+ z0 = svlsl_n_u64_x (p0, z0, 1),
+ z0 = svlsl_x (p0, z0, 1))
+
+/*
+** lsl_1_u64_x_untied:
+** lsl z0\.d, z1\.d, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u64_x_untied, svuint64_t,
+ z0 = svlsl_n_u64_x (p0, z1, 1),
+ z0 = svlsl_x (p0, z1, 1))
+
+/*
+** lsl_m1_u64_x_tied1:
+** mov (z[0-9]+)\.b, #-1
+** lsl z0\.d, p0/m, z0\.d, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u64_x_tied1, svuint64_t,
+ z0 = svlsl_n_u64_x (p0, z0, -1),
+ z0 = svlsl_x (p0, z0, -1))
+
+/*
+** lsl_m1_u64_x_untied:
+** mov z0\.b, #-1
+** lslr z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u64_x_untied, svuint64_t,
+ z0 = svlsl_n_u64_x (p0, z1, -1),
+ z0 = svlsl_x (p0, z1, -1))
+
+/*
+** lsl_63_u64_x_tied1:
+** lsl z0\.d, z0\.d, #63
+** ret
+*/
+TEST_UNIFORM_Z (lsl_63_u64_x_tied1, svuint64_t,
+ z0 = svlsl_n_u64_x (p0, z0, 63),
+ z0 = svlsl_x (p0, z0, 63))
+
+/*
+** lsl_63_u64_x_untied:
+** lsl z0\.d, z1\.d, #63
+** ret
+*/
+TEST_UNIFORM_Z (lsl_63_u64_x_untied, svuint64_t,
+ z0 = svlsl_n_u64_x (p0, z1, 63),
+ z0 = svlsl_x (p0, z1, 63))
+
+/*
+** lsl_64_u64_x_tied1:
+** mov (z[0-9]+\.d), #64
+** lsl z0\.d, p0/m, z0\.d, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_64_u64_x_tied1, svuint64_t,
+ z0 = svlsl_n_u64_x (p0, z0, 64),
+ z0 = svlsl_x (p0, z0, 64))
+
+/*
+** lsl_64_u64_x_untied:
+** mov z0\.d, #64
+** lslr z0\.d, p0/m, z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_64_u64_x_untied, svuint64_t,
+ z0 = svlsl_n_u64_x (p0, z1, 64),
+ z0 = svlsl_x (p0, z1, 64))
new file mode 100644
@@ -0,0 +1,511 @@
+/* { dg-do compile } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+/*
+** lsl_u8_m_tied1:
+** lsl z0\.b, p0/m, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u8_m_tied1, svuint8_t,
+ z0 = svlsl_u8_m (p0, z0, z1),
+ z0 = svlsl_m (p0, z0, z1))
+
+/*
+** lsl_u8_m_untied:
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u8_m_untied, svuint8_t,
+ z0 = svlsl_u8_m (p0, z1, z2),
+ z0 = svlsl_m (p0, z1, z2))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_UNIFORM_Z (lsl_u8_m_tied2, svuint8_t,
+ z1 = svlsl_u8_m (p0, z0, z1),
+ z1 = svlsl_m (p0, z0, z1))
+
+/*
+** lsl_w0_u8_m_tied1:
+** mov (z[0-9]+\.d), x0
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u8_m_tied1, svuint8_t, uint64_t,
+ z0 = svlsl_n_u8_m (p0, z0, x0),
+ z0 = svlsl_m (p0, z0, x0))
+
+/*
+** lsl_w0_u8_m_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u8_m_untied, svuint8_t, uint64_t,
+ z0 = svlsl_n_u8_m (p0, z1, x0),
+ z0 = svlsl_m (p0, z1, x0))
+
+/*
+** lsl_d0_u8_m_tied1:
+** mov (z[0-9]+\.d), d0
+** lsl z1\.b, p0/m, z1\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u8_m_tied1, svuint8_t, uint64_t,
+ z1 = svlsl_n_u8_m (p0, z1, d0),
+ z1 = svlsl_m (p0, z1, d0))
+
+/*
+** lsl_d0_u8_m_untied:
+** mov (z[0-9]+\.d), d0
+** movprfx z1, z2
+** lsl z1\.b, p0/m, z1\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u8_m_untied, svuint8_t, uint64_t,
+ z1 = svlsl_n_u8_m (p0, z2, d0),
+ z1 = svlsl_m (p0, z2, d0))
+
+/*
+** lsl_0_u8_m_tied1:
+** sel z0\.b, p0, z0\.b, z0\.b
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_0_u8_m_tied1, svuint8_t, uint64_t,
+ z0 = svlsl_n_u8_m (p0, z0, 0),
+ z0 = svlsl_m (p0, z0, 0))
+
+/*
+** lsl_0_u8_m_untied:
+** sel z0\.b, p0, z1\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_0_u8_m_untied, svuint8_t, uint64_t,
+ z0 = svlsl_n_u8_m (p0, z1, 0),
+ z0 = svlsl_m (p0, z1, 0))
+
+/*
+** lsl_1_u8_m_tied1:
+** lsl z0\.b, p0/m, z0\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u8_m_tied1, svuint8_t,
+ z0 = svlsl_n_u8_m (p0, z0, 1),
+ z0 = svlsl_m (p0, z0, 1))
+
+/*
+** lsl_1_u8_m_untied:
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u8_m_untied, svuint8_t,
+ z0 = svlsl_n_u8_m (p0, z1, 1),
+ z0 = svlsl_m (p0, z1, 1))
+
+/*
+** lsl_m1_u8_m_tied1:
+** mov (z[0-9]+)\.b, #-1
+** lsl z0\.b, p0/m, z0\.b, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u8_m_tied1, svuint8_t,
+ z0 = svlsl_n_u8_m (p0, z0, -1),
+ z0 = svlsl_m (p0, z0, -1))
+
+/*
+** lsl_m1_u8_m_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u8_m_untied, svuint8_t,
+ z0 = svlsl_n_u8_m (p0, z1, -1),
+ z0 = svlsl_m (p0, z1, -1))
+
+/*
+** lsl_7_u8_m_tied1:
+** lsl z0\.b, p0/m, z0\.b, #7
+** ret
+*/
+TEST_UNIFORM_Z (lsl_7_u8_m_tied1, svuint8_t,
+ z0 = svlsl_n_u8_m (p0, z0, 7),
+ z0 = svlsl_m (p0, z0, 7))
+
+/*
+** lsl_7_u8_m_untied:
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, #7
+** ret
+*/
+TEST_UNIFORM_Z (lsl_7_u8_m_untied, svuint8_t,
+ z0 = svlsl_n_u8_m (p0, z1, 7),
+ z0 = svlsl_m (p0, z1, 7))
+
+/*
+** lsl_8_u8_m_tied1:
+** mov (z[0-9]+\.d), #8
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_8_u8_m_tied1, svuint8_t,
+ z0 = svlsl_n_u8_m (p0, z0, 8),
+ z0 = svlsl_m (p0, z0, 8))
+
+/*
+** lsl_8_u8_m_untied:
+** mov (z[0-9]+\.d), #8
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_8_u8_m_untied, svuint8_t,
+ z0 = svlsl_n_u8_m (p0, z1, 8),
+ z0 = svlsl_m (p0, z1, 8))
+
+/*
+** lsl_u8_z_tied1:
+** movprfx z0\.b, p0/z, z0\.b
+** lsl z0\.b, p0/m, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u8_z_tied1, svuint8_t,
+ z0 = svlsl_u8_z (p0, z0, z1),
+ z0 = svlsl_z (p0, z0, z1))
+
+/*
+** lsl_u8_z_untied:
+** movprfx z0\.b, p0/z, z1\.b
+** lsl z0\.b, p0/m, z0\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u8_z_untied, svuint8_t,
+ z0 = svlsl_u8_z (p0, z1, z2),
+ z0 = svlsl_z (p0, z1, z2))
+
+/*
+** lsl_u8_z_tied2:
+** movprfx z0\.b, p0/z, z0\.b
+** lslr z0\.b, p0/m, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u8_z_tied2, svuint8_t,
+ z0 = svlsl_u8_z (p0, z1, z0),
+ z0 = svlsl_z (p0, z1, z0))
+
+/*
+** lsl_w0_u8_z_tied1:
+** mov (z[0-9]+\.d), x0
+** movprfx z0\.b, p0/z, z0\.b
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u8_z_tied1, svuint8_t, uint64_t,
+ z0 = svlsl_n_u8_z (p0, z0, x0),
+ z0 = svlsl_z (p0, z0, x0))
+
+/*
+** lsl_w0_u8_z_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0\.b, p0/z, z1\.b
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u8_z_untied, svuint8_t, uint64_t,
+ z0 = svlsl_n_u8_z (p0, z1, x0),
+ z0 = svlsl_z (p0, z1, x0))
+
+/*
+** lsl_d0_u8_z_tied1:
+** mov (z[0-9d]+\.d), d0
+** movprfx z1\.b, p0/z, z1\.b
+** lsl z1\.b, p0/m, z1\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u8_z_tied1, svuint8_t, uint64_t,
+ z1 = svlsl_n_u8_z (p0, z1, d0),
+ z1 = svlsl_z (p0, z1, d0))
+
+/*
+** lsl_d0_u8_z_untied:
+** mov (z[0-9d]+\.d), d0
+** movprfx z1\.b, p0/z, z2\.b
+** lsl z1\.b, p0/m, z1\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u8_z_untied, svuint8_t, uint64_t,
+ z1 = svlsl_n_u8_z (p0, z2, d0),
+ z1 = svlsl_z (p0, z2, d0))
+
+/*
+** lsl_0_u8_z_tied1:
+** movprfx z0\.b, p0/z, z0\.b
+** lsl z0\.b, p0/m, z0\.b, #0
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_u8_z_tied1, svuint8_t,
+ z0 = svlsl_n_u8_z (p0, z0, 0),
+ z0 = svlsl_z (p0, z0, 0))
+
+/*
+** lsl_0_u8_z_untied:
+** movprfx z0\.b, p0/z, z1\.b
+** lsl z0\.b, p0/m, z0\.b, #0
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_u8_z_untied, svuint8_t,
+ z0 = svlsl_n_u8_z (p0, z1, 0),
+ z0 = svlsl_z (p0, z1, 0))
+
+/*
+** lsl_1_u8_z_tied1:
+** movprfx z0\.b, p0/z, z0\.b
+** lsl z0\.b, p0/m, z0\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u8_z_tied1, svuint8_t,
+ z0 = svlsl_n_u8_z (p0, z0, 1),
+ z0 = svlsl_z (p0, z0, 1))
+
+/*
+** lsl_1_u8_z_untied:
+** movprfx z0\.b, p0/z, z1\.b
+** lsl z0\.b, p0/m, z0\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u8_z_untied, svuint8_t,
+ z0 = svlsl_n_u8_z (p0, z1, 1),
+ z0 = svlsl_z (p0, z1, 1))
+
+/*
+** lsl_m1_u8_z_tied1:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0\.b, p0/z, z0\.b
+** lsl z0\.b, p0/m, z0\.b, \1.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u8_z_tied1, svuint8_t,
+ z0 = svlsl_n_u8_z (p0, z0, -1),
+ z0 = svlsl_z (p0, z0, -1))
+
+/*
+** lsl_m1_u8_z_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0\.b, p0/z, z1\.b
+** lsl z0\.b, p0/m, z0\.b, \1.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u8_z_untied, svuint8_t,
+ z0 = svlsl_n_u8_z (p0, z1, -1),
+ z0 = svlsl_z (p0, z1, -1))
+
+/*
+** lsl_7_u8_z_tied1:
+** movprfx z0\.b, p0/z, z0\.b
+** lsl z0\.b, p0/m, z0\.b, #7
+** ret
+*/
+TEST_UNIFORM_Z (lsl_7_u8_z_tied1, svuint8_t,
+ z0 = svlsl_n_u8_z (p0, z0, 7),
+ z0 = svlsl_z (p0, z0, 7))
+
+/*
+** lsl_7_u8_z_untied:
+** movprfx z0\.b, p0/z, z1\.b
+** lsl z0\.b, p0/m, z0\.b, #7
+** ret
+*/
+TEST_UNIFORM_Z (lsl_7_u8_z_untied, svuint8_t,
+ z0 = svlsl_n_u8_z (p0, z1, 7),
+ z0 = svlsl_z (p0, z1, 7))
+
+/*
+** lsl_8_u8_z_tied1:
+** mov (z[0-9]+\.d), #8
+** movprfx z0\.b, p0/z, z0\.b
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_8_u8_z_tied1, svuint8_t,
+ z0 = svlsl_n_u8_z (p0, z0, 8),
+ z0 = svlsl_z (p0, z0, 8))
+
+/*
+** lsl_8_u8_z_untied:
+** mov (z[0-9]+\.d), #8
+** movprfx z0\.b, p0/z, z1\.b
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_8_u8_z_untied, svuint8_t,
+ z0 = svlsl_n_u8_z (p0, z1, 8),
+ z0 = svlsl_z (p0, z1, 8))
+
+/*
+** lsl_u8_x_tied1:
+** lsl z0\.b, p0/m, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u8_x_tied1, svuint8_t,
+ z0 = svlsl_u8_x (p0, z0, z1),
+ z0 = svlsl_x (p0, z0, z1))
+
+/*
+** lsl_u8_x_untied:
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u8_x_untied, svuint8_t,
+ z0 = svlsl_u8_m (p0, z1, z2),
+ z0 = svlsl_u8_m (p0, z1, z2))
+
+/*
+** lsl_u8_x_tied2:
+** lslr z1\.b, p0/m, z1\.b, z0\.b
+** ret
+*/
+TEST_UNIFORM_Z (lsl_u8_x_tied2, svuint8_t,
+ z1 = svlsl_u8_x (p0, z0, z1),
+ z1 = svlsl_x (p0, z0, z1))
+
+/*
+** lsl_w0_u8_x_tied1:
+** mov (z[0-9]+\.d), x0
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u8_x_tied1, svuint8_t, uint64_t,
+ z0 = svlsl_n_u8_x (p0, z0, x0),
+ z0 = svlsl_x (p0, z0, x0))
+
+/*
+** lsl_w0_u8_x_untied:
+** mov (z[0-9]+\.d), x0
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_w0_u8_x_untied, svuint8_t, uint64_t,
+ z0 = svlsl_n_u8_x (p0, z1, x0),
+ z0 = svlsl_x (p0, z1, x0))
+
+/*
+** lsl_d0_u8_x_tied1:
+** mov (z[0-9]+\.d), d0
+** lsl z1\.b, p0/m, z1\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u8_x_tied1, svuint8_t, uint64_t,
+ z1 = svlsl_n_u8_x (p0, z1, d0),
+ z1 = svlsl_x (p0, z1, d0))
+
+/*
+** lsl_d0_u8_x_untied:
+** mov (z[0-9]+\.d), d0
+** movprfx z1, z2
+** lsl z1\.b, p0/m, z1\.b, \1
+** ret
+*/
+TEST_UNIFORM_ZS (lsl_d0_u8_x_untied, svuint8_t, uint64_t,
+ z1 = svlsl_n_u8_x (p0, z2, d0),
+ z1 = svlsl_x (p0, z2, d0))
+
+/*
+** lsl_0_u8_x_tied1:
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_u8_x_tied1, svuint8_t,
+ z0 = svlsl_n_u8_x (p0, z0, 0),
+ z0 = svlsl_x (p0, z0, 0))
+
+/*
+** lsl_0_u8_x_untied:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_0_u8_x_untied, svuint8_t,
+ z0 = svlsl_n_u8_x (p0, z1, 0),
+ z0 = svlsl_x (p0, z1, 0))
+
+/*
+** lsl_1_u8_x_tied1:
+** lsl z0\.b, z0\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u8_x_tied1, svuint8_t,
+ z0 = svlsl_n_u8_x (p0, z0, 1),
+ z0 = svlsl_x (p0, z0, 1))
+
+/*
+** lsl_1_u8_x_untied:
+** lsl z0\.b, z1\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_1_u8_x_untied, svuint8_t,
+ z0 = svlsl_n_u8_x (p0, z1, 1),
+ z0 = svlsl_x (p0, z1, 1))
+
+/*
+** lsl_m1_u8_x_tied1:
+** mov (z[0-9]+)\.b, #-1
+** lsl z0\.b, p0/m, z0\.b, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u8_x_tied1, svuint8_t,
+ z0 = svlsl_n_u8_x (p0, z0, -1),
+ z0 = svlsl_x (p0, z0, -1))
+
+/*
+** lsl_m1_u8_x_untied:
+** mov (z[0-9]+)\.b, #-1
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, \1\.d
+** ret
+*/
+TEST_UNIFORM_Z (lsl_m1_u8_x_untied, svuint8_t,
+ z0 = svlsl_n_u8_x (p0, z1, -1),
+ z0 = svlsl_x (p0, z1, -1))
+
+/*
+** lsl_7_u8_x_tied1:
+** lsl z0\.b, z0\.b, #7
+** ret
+*/
+TEST_UNIFORM_Z (lsl_7_u8_x_tied1, svuint8_t,
+ z0 = svlsl_n_u8_x (p0, z0, 7),
+ z0 = svlsl_x (p0, z0, 7))
+
+/*
+** lsl_7_u8_x_untied:
+** lsl z0\.b, z1\.b, #7
+** ret
+*/
+TEST_UNIFORM_Z (lsl_7_u8_x_untied, svuint8_t,
+ z0 = svlsl_n_u8_x (p0, z1, 7),
+ z0 = svlsl_x (p0, z1, 7))
+
+/*
+** lsl_8_u8_x_tied1:
+** mov (z[0-9]+\.d), #8
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_8_u8_x_tied1, svuint8_t,
+ z0 = svlsl_n_u8_x (p0, z0, 8),
+ z0 = svlsl_x (p0, z0, 8))
+
+/*
+** lsl_8_u8_x_untied:
+** mov (z[0-9]+\.d), #8
+** movprfx z0, z1
+** lsl z0\.b, p0/m, z0\.b, \1
+** ret
+*/
+TEST_UNIFORM_Z (lsl_8_u8_x_untied, svuint8_t,
+ z0 = svlsl_n_u8_x (p0, z1, 8),
+ z0 = svlsl_x (p0, z1, 8))