@@ -460,7 +460,7 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
}
if (sz == 1) {
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_FPCR_F16_A32);
} else {
fpst = fpstatus_ptr(FPST_FPCR_A32);
}
@@ -527,7 +527,7 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
}
if (sz == 1) {
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_FPCR_F16_A32);
} else {
fpst = fpstatus_ptr(FPST_FPCR_A32);
}
@@ -1433,7 +1433,7 @@ static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn,
/*
* Do a half-precision operation. Functionally this is
* the same as do_vfp_3op_sp(), except:
- * - it uses the FPST_FPCR_F16
+ * - it uses the FPST_FPCR_F16_A32
* - it doesn't need the VFP vector handling (fp16 is a
* v8 feature, and in v8 VFP vectors don't exist)
* - it does the aa32_fp16_arith feature test
@@ -1456,7 +1456,7 @@ static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn,
f0 = tcg_temp_new_i32();
f1 = tcg_temp_new_i32();
fd = tcg_temp_new_i32();
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_FPCR_F16_A32);
vfp_load_reg16(f0, vn);
vfp_load_reg16(f1, vm);
@@ -2122,7 +2122,7 @@ static bool do_vfm_hp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
/* VFNMA, VFNMS */
gen_vfp_negh(vd, vd);
}
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_FPCR_F16_A32);
gen_helper_vfp_muladdh(vd, vn, vm, vd, fpst);
vfp_store_reg32(vd, a->vd);
return true;
@@ -2424,7 +2424,7 @@ DO_VFP_2OP(VNEG, dp, gen_vfp_negd, aa32_fpdp_v2)
static void gen_VSQRT_hp(TCGv_i32 vd, TCGv_i32 vm)
{
- gen_helper_vfp_sqrth(vd, vm, fpstatus_ptr(FPST_FPCR_F16));
+ gen_helper_vfp_sqrth(vd, vm, fpstatus_ptr(FPST_FPCR_F16_A32));
}
static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
@@ -2706,7 +2706,7 @@ static bool trans_VRINTR_hp(DisasContext *s, arg_VRINTR_sp *a)
tmp = tcg_temp_new_i32();
vfp_load_reg16(tmp, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_FPCR_F16_A32);
gen_helper_rinth(tmp, tmp, fpst);
vfp_store_reg32(tmp, a->vd);
return true;
@@ -2779,7 +2779,7 @@ static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a)
tmp = tcg_temp_new_i32();
vfp_load_reg16(tmp, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_FPCR_F16_A32);
tcg_rmode = gen_set_rmode(FPROUNDING_ZERO, fpst);
gen_helper_rinth(tmp, tmp, fpst);
gen_restore_rmode(tcg_rmode, fpst);
@@ -2859,7 +2859,7 @@ static bool trans_VRINTX_hp(DisasContext *s, arg_VRINTX_sp *a)
tmp = tcg_temp_new_i32();
vfp_load_reg16(tmp, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_FPCR_F16_A32);
gen_helper_rinth_exact(tmp, tmp, fpst);
vfp_store_reg32(tmp, a->vd);
return true;
@@ -2983,7 +2983,7 @@ static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a)
vm = tcg_temp_new_i32();
vfp_load_reg32(vm, a->vm);
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_FPCR_F16_A32);
if (a->s) {
/* i32 -> f16 */
gen_helper_vfp_sitoh(vm, vm, fpst);
@@ -3105,7 +3105,7 @@ static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a)
vd = tcg_temp_new_i32();
vfp_load_reg32(vd, a->vd);
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_FPCR_F16_A32);
shift = tcg_constant_i32(frac_bits);
/* Switch on op:U:sx bits */
@@ -3273,7 +3273,7 @@ static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a)
return true;
}
- fpst = fpstatus_ptr(FPST_FPCR_F16);
+ fpst = fpstatus_ptr(FPST_FPCR_F16_A32);
vm = tcg_temp_new_i32();
vfp_load_reg16(vm, a->vm);
In the A32 decoder, use FPST_FPCR_F16_A32 rather than FPST_FPCR_F16. By doing an automated conversion of the whole file we avoid possibly using more than one fpst value in a set_rmode/op/restore_rmode sequence. Patch created with perl -p -i -e 's/FPST_FPCR_F16(?!_)/FPST_FPCR_F16_A32/g' target/arm/tcg/translate-vfp.c Signed-off-by: Peter Maydell <peter.maydell@linaro.org> --- target/arm/tcg/translate-vfp.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-)