diff mbox

[v6,2/4] target-arm: Implement ARMv8 VSEL instruction.

Message ID 52977853.9010104@linaro.org
State Superseded
Headers show

Commit Message

Will Newton Nov. 28, 2013, 5:07 p.m. UTC
This adds support for the VSEL floating point selection instruction
which was added in ARMv8.

Signed-off-by: Will Newton <will.newton@linaro.org>
---
 target-arm/translate.c | 130 ++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 129 insertions(+), 1 deletion(-)

Changes in v6:
 - None

Comments

Peter Maydell Nov. 29, 2013, 4:01 p.m. UTC | #1
On 28 November 2013 17:07, Will Newton <will.newton@linaro.org> wrote:
>
> This adds support for the VSEL floating point selection instruction
> which was added in ARMv8.
>
> Signed-off-by: Will Newton <will.newton@linaro.org>
> ---
>  target-arm/translate.c | 130 ++++++++++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 129 insertions(+), 1 deletion(-)
>
> Changes in v6:
>  - None
>
> diff --git a/target-arm/translate.c b/target-arm/translate.c
> index 5a6c1ea..4e7077e 100644
> --- a/target-arm/translate.c
> +++ b/target-arm/translate.c
> @@ -2614,6 +2614,134 @@ static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
>      return tmp;
>  }
>
> +static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
> +{
> +    uint32_t rd, rn, rm, dp = (insn >> 8) & 1;

dp = extract32(insn, 8, 1);

> +
> +    if (!arm_feature(env, ARM_FEATURE_V8)) {
> +        return 1;
> +    }
> +
> +    if (dp) {
> +        VFP_DREG_D(rd, insn);
> +        VFP_DREG_N(rn, insn);
> +        VFP_DREG_M(rm, insn);
> +    } else {
> +        rd = VFP_SREG_D(insn);
> +        rn = VFP_SREG_N(insn);
> +        rm = VFP_SREG_M(insn);
> +    }
> +
> +    if ((insn & 0x0f800e50) == 0x0e000a00) {
> +        /* vsel */

Can we call out to a disas_vsel() helper to do the actual
implementation here, please? It's pretty hard to read
if...else if ... else ladders when there's a hundred
lines of code in each section.

(Same applies for the vmaxnm/vminnm code you add in this
function in a later patch.)

> +        uint32_t cc = (insn >> 20) & 3;

cc = extract32(insn, 20, 2);

> +
> +        if (dp) {
> +            TCGv_i64 ftmp1, ftmp2, ftmp3;
> +            TCGv_i64 tmp, zero, zf, nf, vf;
> +
> +            zero = tcg_const_i64(0);
> +
> +            ftmp1 = tcg_temp_new_i64();
> +            ftmp2 = tcg_temp_new_i64();
> +            ftmp3 = tcg_temp_new_i64();
> +
> +            zf = tcg_temp_new_i64();
> +            nf = tcg_temp_new_i64();
> +            vf = tcg_temp_new_i64();
> +
> +            tcg_gen_extu_i32_i64(zf, cpu_ZF);
> +            tcg_gen_extu_i32_i64(nf, cpu_NF);
> +            tcg_gen_extu_i32_i64(vf, cpu_VF);
> +
> +            tcg_gen_ld_f64(ftmp1, cpu_env, vfp_reg_offset(dp, rn));
> +            tcg_gen_ld_f64(ftmp2, cpu_env, vfp_reg_offset(dp, rm));

"frn" and "frm" or something would be more helpful names
than "ftmp1" and "ftmp2"...

> +            switch (cc) {
> +            case 0: /* eq: Z */
> +                tcg_gen_movcond_i64(TCG_COND_EQ, ftmp3, zf, zero,
> +                                    ftmp1, ftmp2);
> +                break;
> +            case 1: /* vs: V */
> +                tcg_gen_movcond_i64(TCG_COND_LT, ftmp3, vf, zero,
> +                                    ftmp1, ftmp2);
> +                break;
> +            case 2: /* ge: N == V -> N ^ V == 0 */
> +                tmp = tcg_temp_new_i64();
> +                tcg_gen_xor_i64(tmp, vf, nf);
> +                tcg_gen_movcond_i64(TCG_COND_GE, ftmp3, tmp, zero,
> +                                    ftmp1, ftmp2);

The combination of this GE plus the zero-extend earlier
looks pretty definitely wrong. cpu_NF and cpu_VF are
both defined (as per comments in target-arm/cpu.h) as
"bit 31 is the flag bit, all other bits undefined";
this is why we use a COND_GE rather than a COND_EQ, but
a 64 bit GE is testing bit 63, not bit 31. Using
sign-extension rather than zero extension to create your
64 bit versions of the flags should fix this.

Similar remarks apply in the 32 bit half of this if().

thanks
-- PMM
diff mbox

Patch

diff --git a/target-arm/translate.c b/target-arm/translate.c
index 5a6c1ea..4e7077e 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -2614,6 +2614,134 @@  static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
     return tmp;
 }

+static int disas_vfp_v8_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
+{
+    uint32_t rd, rn, rm, dp = (insn >> 8) & 1;
+
+    if (!arm_feature(env, ARM_FEATURE_V8)) {
+        return 1;
+    }
+
+    if (dp) {
+        VFP_DREG_D(rd, insn);
+        VFP_DREG_N(rn, insn);
+        VFP_DREG_M(rm, insn);
+    } else {
+        rd = VFP_SREG_D(insn);
+        rn = VFP_SREG_N(insn);
+        rm = VFP_SREG_M(insn);
+    }
+
+    if ((insn & 0x0f800e50) == 0x0e000a00) {
+        /* vsel */
+        uint32_t cc = (insn >> 20) & 3;
+
+        if (dp) {
+            TCGv_i64 ftmp1, ftmp2, ftmp3;
+            TCGv_i64 tmp, zero, zf, nf, vf;
+
+            zero = tcg_const_i64(0);
+
+            ftmp1 = tcg_temp_new_i64();
+            ftmp2 = tcg_temp_new_i64();
+            ftmp3 = tcg_temp_new_i64();
+
+            zf = tcg_temp_new_i64();
+            nf = tcg_temp_new_i64();
+            vf = tcg_temp_new_i64();
+
+            tcg_gen_extu_i32_i64(zf, cpu_ZF);
+            tcg_gen_extu_i32_i64(nf, cpu_NF);
+            tcg_gen_extu_i32_i64(vf, cpu_VF);
+
+            tcg_gen_ld_f64(ftmp1, cpu_env, vfp_reg_offset(dp, rn));
+            tcg_gen_ld_f64(ftmp2, cpu_env, vfp_reg_offset(dp, rm));
+            switch (cc) {
+            case 0: /* eq: Z */
+                tcg_gen_movcond_i64(TCG_COND_EQ, ftmp3, zf, zero,
+                                    ftmp1, ftmp2);
+                break;
+            case 1: /* vs: V */
+                tcg_gen_movcond_i64(TCG_COND_LT, ftmp3, vf, zero,
+                                    ftmp1, ftmp2);
+                break;
+            case 2: /* ge: N == V -> N ^ V == 0 */
+                tmp = tcg_temp_new_i64();
+                tcg_gen_xor_i64(tmp, vf, nf);
+                tcg_gen_movcond_i64(TCG_COND_GE, ftmp3, tmp, zero,
+                                    ftmp1, ftmp2);
+                tcg_temp_free_i64(tmp);
+                break;
+            case 3: /* gt: !Z && N == V */
+                tcg_gen_movcond_i64(TCG_COND_NE, ftmp3, zf, zero,
+                                    ftmp1, ftmp2);
+                tmp = tcg_temp_new_i64();
+                tcg_gen_xor_i64(tmp, vf, nf);
+                tcg_gen_movcond_i64(TCG_COND_GE, ftmp3, tmp, zero,
+                                    ftmp3, ftmp2);
+                tcg_temp_free_i64(tmp);
+                break;
+            }
+            tcg_gen_st_f64(ftmp3, cpu_env, vfp_reg_offset(dp, rd));
+            tcg_temp_free_i64(ftmp1);
+            tcg_temp_free_i64(ftmp2);
+            tcg_temp_free_i64(ftmp3);
+
+            tcg_temp_free_i64(zf);
+            tcg_temp_free_i64(nf);
+            tcg_temp_free_i64(vf);
+
+            tcg_temp_free_i64(zero);
+        } else {
+            TCGv_i32 ftmp1, ftmp2, ftmp3;
+            TCGv_i32 tmp, zero;
+
+            zero = tcg_const_i32(0);
+
+            ftmp1 = tcg_temp_new_i32();
+            ftmp2 = tcg_temp_new_i32();
+            ftmp3 = tcg_temp_new_i32();
+            tcg_gen_ld_f32(ftmp1, cpu_env, vfp_reg_offset(dp, rn));
+            tcg_gen_ld_f32(ftmp2, cpu_env, vfp_reg_offset(dp, rm));
+            switch (cc) {
+            case 0: /* eq: Z */
+                tcg_gen_movcond_i32(TCG_COND_EQ, ftmp3, cpu_ZF, zero,
+                                    ftmp1, ftmp2);
+                break;
+            case 1: /* vs: V */
+                tcg_gen_movcond_i32(TCG_COND_LT, ftmp3, cpu_VF, zero,
+                                    ftmp1, ftmp2);
+                break;
+            case 2: /* ge: N == V -> N ^ V == 0 */
+                tmp = tcg_temp_new_i32();
+                tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
+                tcg_gen_movcond_i32(TCG_COND_GE, ftmp3, tmp, zero,
+                                    ftmp1, ftmp2);
+                tcg_temp_free_i32(tmp);
+                break;
+            case 3: /* gt: !Z && N == V */
+                tcg_gen_movcond_i32(TCG_COND_NE, ftmp3, cpu_ZF, zero,
+                                    ftmp1, ftmp2);
+                tmp = tcg_temp_new_i32();
+                tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
+                tcg_gen_movcond_i32(TCG_COND_GE, ftmp3, tmp, zero,
+                                    ftmp3, ftmp2);
+                tcg_temp_free_i32(tmp);
+                break;
+            }
+            tcg_gen_st_f32(ftmp3, cpu_env, vfp_reg_offset(dp, rd));
+            tcg_temp_free_i32(ftmp1);
+            tcg_temp_free_i32(ftmp2);
+            tcg_temp_free_i32(ftmp3);
+
+            tcg_temp_free_i32(zero);
+        }
+
+        return 0;
+    }
+    return 1;
+}
+
 /* Disassemble a VFP instruction.  Returns nonzero if an error occurred
    (ie. an undefined instruction).  */
 static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
@@ -2640,7 +2768,7 @@  static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
     if (extract32(insn, 28, 4) == 0xf) {
         /* Encodings with T=1 (Thumb) or unconditional (ARM):
            only used in v8 and above.  */
-        return 1;
+        return disas_vfp_v8_insn(env, s, insn);
     }

     dp = ((insn & 0xf00) == 0xb00);