Message ID | 20210416235928.1631788-9-richard.henderson@linaro.org |
---|---|
State | New |
Headers | show |
Series | target/arm: Implement BFloat16 | expand |
On Sat, 17 Apr 2021 at 01:00, Richard Henderson <richard.henderson@linaro.org> wrote: > > This is BFMMLA for both AArch64 AdvSIMD and SVE, > and VMMLA.BF16 for AArch32 NEON. > > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > +void HELPER(gvec_bfmmla)(void *vd, void *vn, void *vm, void *va, uint32_t desc) > +{ > + intptr_t s, opr_sz = simd_oprsz(desc); > + float32 *d = vd, *a = va; > + uint32_t *n = vn, *m = vm; > + > + for (s = 0; s < opr_sz / 4; s += 4) { > + float32 sum00, sum01, sum10, sum11; > + > + /* > + * Process the entire segment at once, writing back the > + * results only after we've consumed all of the inputs. > + * > + * Key to indicies by column: "indices" > + * i j i k j k > + */ > + sum00 = a[s + H4(0 + 0)]; > + sum00 = bfdotadd(sum00, n[s + H4(0 + 0)], m[s + H4(0 + 0)]); > + sum00 = bfdotadd(sum00, n[s + H4(0 + 1)], m[s + H4(0 + 1)]); I can't make these indices match up with the arm arm pseudocode ones, which index by "4*i + 2*k + 0" and "4*i + 2*k + 1", not "2*i + k"; are we hiding a division by 2 somewhere? > + > + sum01 = a[s + H4(0 + 1)]; > + sum01 = bfdotadd(sum01, n[s + H4(0 + 0)], m[s + H4(2 + 0)]); > + sum01 = bfdotadd(sum01, n[s + H4(0 + 1)], m[s + H4(2 + 1)]); > + > + sum10 = a[s + H4(2 + 0)]; > + sum10 = bfdotadd(sum10, n[s + H4(2 + 0)], m[s + H4(0 + 0)]); > + sum10 = bfdotadd(sum10, n[s + H4(2 + 1)], m[s + H4(0 + 1)]); > + > + sum11 = a[s + H4(2 + 1)]; > + sum11 = bfdotadd(sum11, n[s + H4(2 + 0)], m[s + H4(2 + 0)]); > + sum11 = bfdotadd(sum11, n[s + H4(2 + 1)], m[s + H4(2 + 1)]); > + > + d[s + H4(0 + 0)] = sum00; > + d[s + H4(0 + 1)] = sum01; > + d[s + H4(2 + 0)] = sum10; > + d[s + H4(2 + 1)] = sum11; > + } > + clear_tail(d, opr_sz, simd_maxsz(desc)); Otherwise Reviewed-by: Peter Maydell <peter.maydell@linaro.org> thanks -- PMM
On 5/18/21 7:37 AM, Peter Maydell wrote: > On Sat, 17 Apr 2021 at 01:00, Richard Henderson > <richard.henderson@linaro.org> wrote: >> >> This is BFMMLA for both AArch64 AdvSIMD and SVE, >> and VMMLA.BF16 for AArch32 NEON. >> >> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > >> +void HELPER(gvec_bfmmla)(void *vd, void *vn, void *vm, void *va, uint32_t desc) >> +{ >> + intptr_t s, opr_sz = simd_oprsz(desc); >> + float32 *d = vd, *a = va; >> + uint32_t *n = vn, *m = vm; >> + >> + for (s = 0; s < opr_sz / 4; s += 4) { >> + float32 sum00, sum01, sum10, sum11; >> + >> + /* >> + * Process the entire segment at once, writing back the >> + * results only after we've consumed all of the inputs. >> + * >> + * Key to indicies by column: > > "indices" > >> + * i j i k j k >> + */ >> + sum00 = a[s + H4(0 + 0)]; >> + sum00 = bfdotadd(sum00, n[s + H4(0 + 0)], m[s + H4(0 + 0)]); >> + sum00 = bfdotadd(sum00, n[s + H4(0 + 1)], m[s + H4(0 + 1)]); > > I can't make these indices match up with the arm arm pseudocode ones, > which index by "4*i + 2*k + 0" and "4*i + 2*k + 1", not "2*i + k"; > are we hiding a division by 2 somewhere? Yes. We're passing BFloat16 pairs via uint32_t[] to bfdotadd(). r~
diff --git a/target/arm/helper.h b/target/arm/helper.h index af0ee8f693..74f8bc766f 100644 --- a/target/arm/helper.h +++ b/target/arm/helper.h @@ -1008,6 +1008,9 @@ DEF_HELPER_FLAGS_5(gvec_bfdot, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_5(gvec_bfdot_idx, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_bfmmla, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + #ifdef TARGET_AARCH64 #include "helper-a64.h" #include "helper-sve.h" diff --git a/target/arm/neon-shared.decode b/target/arm/neon-shared.decode index fa3cf14e3a..4e0a25d27c 100644 --- a/target/arm/neon-shared.decode +++ b/target/arm/neon-shared.decode @@ -67,6 +67,8 @@ VUMMLA 1111 1100 0.10 .... .... 1100 .1.1 .... \ vm=%vm_dp vn=%vn_dp vd=%vd_dp VUSMMLA 1111 1100 1.10 .... .... 1100 .1.0 .... \ vm=%vm_dp vn=%vn_dp vd=%vd_dp +VMMLA_b16 1111 1100 0.00 .... .... 1100 .1.0 .... \ + vm=%vm_dp vn=%vn_dp vd=%vd_dp VCMLA_scalar 1111 1110 0 . rot:2 .... .... 1000 . q:1 index:1 0 vm:4 \ vn=%vn_dp vd=%vd_dp size=1 diff --git a/target/arm/sve.decode b/target/arm/sve.decode index d5e1e5d400..aa8d5e4b8f 100644 --- a/target/arm/sve.decode +++ b/target/arm/sve.decode @@ -1519,8 +1519,10 @@ SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx USDOT_zzzz 01000100 .. 0 ..... 011 110 ..... ..... @rda_rn_rm ### SVE2 floating point matrix multiply accumulate - -FMMLA 01100100 .. 1 ..... 111001 ..... ..... @rda_rn_rm +{ + BFMMLA 01100100 01 1 ..... 111 001 ..... ..... @rda_rn_rm_e0 + FMMLA 01100100 .. 1 ..... 111 001 ..... ..... @rda_rn_rm +} ### SVE2 Memory Gather Load Group diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index f60afbbd06..8636eac4a8 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -12243,6 +12243,13 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) } feature = dc_isar_feature(aa64_fcma, s); break; + case 0x1d: /* BFMMLA */ + if (size != MO_16 || !is_q) { + unallocated_encoding(s); + return; + } + feature = dc_isar_feature(aa64_bf16, s); + break; case 0x1f: /* BFDOT */ switch (size) { case 1: @@ -12336,6 +12343,9 @@ static void disas_simd_three_reg_same_extra(DisasContext *s, uint32_t insn) } return; + case 0xd: /* BFMMLA */ + gen_gvec_op4_ool(s, is_q, rd, rn, rm, rd, 0, gen_helper_gvec_bfmmla); + return; case 0xf: /* BFDOT */ switch (size) { case 1: diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c index ef6828c632..9ade521705 100644 --- a/target/arm/translate-sve.c +++ b/target/arm/translate-sve.c @@ -8610,3 +8610,15 @@ static bool trans_BFDOT_zzxz(DisasContext *s, arg_rrxr_esz *a) } return true; } + +static bool trans_BFMMLA(DisasContext *s, arg_rrrr_esz *a) +{ + if (!dc_isar_feature(aa64_sve_bf16, s)) { + return false; + } + if (sve_access_check(s)) { + gen_gvec_ool_zzzz(s, gen_helper_gvec_bfmmla, + a->rd, a->rn, a->rm, a->ra, 0); + } + return true; +} diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c index 3e26fb0e5f..623a0872f3 100644 --- a/target/arm/vec_helper.c +++ b/target/arm/vec_helper.c @@ -2715,3 +2715,43 @@ void HELPER(gvec_bfdot_idx)(void *vd, void *vn, void *vm, } clear_tail(d, opr_sz, simd_maxsz(desc)); } + +void HELPER(gvec_bfmmla)(void *vd, void *vn, void *vm, void *va, uint32_t desc) +{ + intptr_t s, opr_sz = simd_oprsz(desc); + float32 *d = vd, *a = va; + uint32_t *n = vn, *m = vm; + + for (s = 0; s < opr_sz / 4; s += 4) { + float32 sum00, sum01, sum10, sum11; + + /* + * Process the entire segment at once, writing back the + * results only after we've consumed all of the inputs. + * + * Key to indicies by column: + * i j i k j k + */ + sum00 = a[s + H4(0 + 0)]; + sum00 = bfdotadd(sum00, n[s + H4(0 + 0)], m[s + H4(0 + 0)]); + sum00 = bfdotadd(sum00, n[s + H4(0 + 1)], m[s + H4(0 + 1)]); + + sum01 = a[s + H4(0 + 1)]; + sum01 = bfdotadd(sum01, n[s + H4(0 + 0)], m[s + H4(2 + 0)]); + sum01 = bfdotadd(sum01, n[s + H4(0 + 1)], m[s + H4(2 + 1)]); + + sum10 = a[s + H4(2 + 0)]; + sum10 = bfdotadd(sum10, n[s + H4(2 + 0)], m[s + H4(0 + 0)]); + sum10 = bfdotadd(sum10, n[s + H4(2 + 1)], m[s + H4(0 + 1)]); + + sum11 = a[s + H4(2 + 1)]; + sum11 = bfdotadd(sum11, n[s + H4(2 + 0)], m[s + H4(2 + 0)]); + sum11 = bfdotadd(sum11, n[s + H4(2 + 1)], m[s + H4(2 + 1)]); + + d[s + H4(0 + 0)] = sum00; + d[s + H4(0 + 1)] = sum01; + d[s + H4(2 + 0)] = sum10; + d[s + H4(2 + 1)] = sum11; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} diff --git a/target/arm/translate-neon.c.inc b/target/arm/translate-neon.c.inc index bb0adf4756..7ce65f691f 100644 --- a/target/arm/translate-neon.c.inc +++ b/target/arm/translate-neon.c.inc @@ -4117,3 +4117,12 @@ static bool trans_VUSMMLA(DisasContext *s, arg_VUSMMLA *a) return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0, gen_helper_gvec_usmmla_b); } + +static bool trans_VMMLA_b16(DisasContext *s, arg_VMMLA_b16 *a) +{ + if (!dc_isar_feature(aa32_bf16, s)) { + return false; + } + return do_neon_ddda(s, 7, a->vd, a->vn, a->vm, 0, + gen_helper_gvec_bfmmla); +}
This is BFMMLA for both AArch64 AdvSIMD and SVE, and VMMLA.BF16 for AArch32 NEON. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/arm/helper.h | 3 +++ target/arm/neon-shared.decode | 2 ++ target/arm/sve.decode | 6 +++-- target/arm/translate-a64.c | 10 +++++++++ target/arm/translate-sve.c | 12 ++++++++++ target/arm/vec_helper.c | 40 +++++++++++++++++++++++++++++++++ target/arm/translate-neon.c.inc | 9 ++++++++ 7 files changed, 80 insertions(+), 2 deletions(-) -- 2.25.1