@@ -21,13 +21,6 @@ static bool s390_vec_is_zero(const S390Vector *v)
return !v->doubleword[0] && !v->doubleword[1];
}
-static void s390_vec_xor(S390Vector *res, const S390Vector *a,
- const S390Vector *b)
-{
- res->doubleword[0] = a->doubleword[0] ^ b->doubleword[0];
- res->doubleword[1] = a->doubleword[1] ^ b->doubleword[1];
-}
-
static void s390_vec_and(S390Vector *res, const S390Vector *a,
const S390Vector *b)
{
@@ -166,26 +159,6 @@ DEF_VCTZ(16)
/* like binary multiplication, but XOR instead of addition */
-static S390Vector galois_multiply64(uint64_t a, uint64_t b)
-{
- S390Vector res = {};
- S390Vector va = {
- .doubleword[1] = a,
- };
- S390Vector vb = {
- .doubleword[1] = b,
- };
-
- while (!s390_vec_is_zero(&vb)) {
- if (vb.doubleword[1] & 0x1) {
- s390_vec_xor(&res, &res, &va);
- }
- s390_vec_shl(&va, &va, 1);
- s390_vec_shr(&vb, &vb, 1);
- }
- return res;
-}
-
static Int128 do_gfm8(Int128 n, Int128 m)
{
Int128 e = clmul_8x8_even(n, m);
@@ -247,35 +220,28 @@ void HELPER(gvec_vgfma32)(void *v1, const void *v2, const void *v3,
*(Int128 *)v1 = int128_xor(r, *(Int128 *)v4);
}
+static Int128 do_gfm64(Int128 n, Int128 m)
+{
+ /*
+ * The two 64-bit halves are treated identically,
+ * therefore host ordering does not matter.
+ */
+ Int128 e = clmul_64(int128_getlo(n), int128_getlo(m));
+ Int128 o = clmul_64(int128_gethi(n), int128_gethi(m));
+ return int128_xor(e, o);
+}
+
void HELPER(gvec_vgfm64)(void *v1, const void *v2, const void *v3,
uint32_t desc)
{
- S390Vector tmp1, tmp2;
- uint64_t a, b;
-
- a = s390_vec_read_element64(v2, 0);
- b = s390_vec_read_element64(v3, 0);
- tmp1 = galois_multiply64(a, b);
- a = s390_vec_read_element64(v2, 1);
- b = s390_vec_read_element64(v3, 1);
- tmp2 = galois_multiply64(a, b);
- s390_vec_xor(v1, &tmp1, &tmp2);
+ *(Int128 *)v1 = do_gfm64(*(const Int128 *)v2, *(const Int128 *)v3);
}
void HELPER(gvec_vgfma64)(void *v1, const void *v2, const void *v3,
const void *v4, uint32_t desc)
{
- S390Vector tmp1, tmp2;
- uint64_t a, b;
-
- a = s390_vec_read_element64(v2, 0);
- b = s390_vec_read_element64(v3, 0);
- tmp1 = galois_multiply64(a, b);
- a = s390_vec_read_element64(v2, 1);
- b = s390_vec_read_element64(v3, 1);
- tmp2 = galois_multiply64(a, b);
- s390_vec_xor(&tmp1, &tmp1, &tmp2);
- s390_vec_xor(v1, &tmp1, v4);
+ Int128 r = do_gfm64(*(const Int128 *)v2, *(const Int128 *)v3);
+ *(Int128 *)v1 = int128_xor(r, *(Int128 *)v4);
}
#define DEF_VMAL(BITS) \
Use the generic routine for 64-bit carry-less multiply. Remove our local version of galois_multiply64. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/s390x/tcg/vec_int_helper.c | 62 +++++++------------------------ 1 file changed, 14 insertions(+), 48 deletions(-)