@@ -25,6 +25,7 @@
#include "exec/helper-proto.h"
#include "crypto/aes.h"
#include "crypto/aes-round.h"
+#include "crypto/clmul.h"
#include "fpu/softfloat.h"
#include "qapi/error.h"
#include "qemu/guest-random.h"
@@ -1424,6 +1425,18 @@ void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
#undef VBPERMQ_INDEX
#undef VBPERMQ_DW
+/*
+ * There is no carry across the two doublewords, so their order does
+ * not matter. Nor is there partial overlap between registers.
+ */
+void helper_vpmsumb(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ for (int i = 0; i < 2; ++i) {
+ uint64_t aa = a->u64[i], bb = b->u64[i];
+ r->u64[i] = clmul_8x4_even(aa, bb) ^ clmul_8x4_odd(aa, bb);
+ }
+}
+
#define PMSUM(name, srcfld, trgfld, trgtyp) \
void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
{ \
@@ -1444,7 +1457,6 @@ void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
} \
}
-PMSUM(vpmsumb, u8, u16, uint16_t)
PMSUM(vpmsumh, u16, u32, uint32_t)
PMSUM(vpmsumw, u32, u64, uint64_t)
Use generic routines for 8-bit carry-less multiply. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/ppc/int_helper.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-)