@@ -1741,6 +1741,9 @@ static inline bool arm_singlestep_active(CPUARMState *env)
#define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT)
#define ARM_TBFLAG_PSTATE_SS_SHIFT 26
#define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT)
+/* Target EL if we take a floating-point-disabled exception */
+#define ARM_TBFLAG_FPEXC_EL_SHIFT 24
+#define ARM_TBFLAG_FPEXC_EL_MASK (0x3 << ARM_TBFLAG_FPEXC_EL_SHIFT)
/* Bit usage when in AArch32 state: */
#define ARM_TBFLAG_THUMB_SHIFT 0
@@ -1755,8 +1758,6 @@ static inline bool arm_singlestep_active(CPUARMState *env)
#define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT)
#define ARM_TBFLAG_BSWAP_CODE_SHIFT 16
#define ARM_TBFLAG_BSWAP_CODE_MASK (1 << ARM_TBFLAG_BSWAP_CODE_SHIFT)
-#define ARM_TBFLAG_CPACR_FPEN_SHIFT 17
-#define ARM_TBFLAG_CPACR_FPEN_MASK (1 << ARM_TBFLAG_CPACR_FPEN_SHIFT)
/* We store the bottom two bits of the CPAR as TB flags and handle
* checks on the other bits at runtime
*/
@@ -1769,9 +1770,7 @@ static inline bool arm_singlestep_active(CPUARMState *env)
#define ARM_TBFLAG_NS_SHIFT 22
#define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT)
-/* Bit usage when in AArch64 state */
-#define ARM_TBFLAG_AA64_FPEN_SHIFT 2
-#define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT)
+/* Bit usage when in AArch64 state: currently we have no A64 specific bits */
/* some convenience accessor macros */
#define ARM_TBFLAG_AARCH64_STATE(F) \
@@ -1782,6 +1781,8 @@ static inline bool arm_singlestep_active(CPUARMState *env)
(((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT)
#define ARM_TBFLAG_PSTATE_SS(F) \
(((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT)
+#define ARM_TBFLAG_FPEXC_EL(F) \
+ (((F) & ARM_TBFLAG_FPEXC_EL_MASK) >> ARM_TBFLAG_FPEXC_EL_SHIFT)
#define ARM_TBFLAG_THUMB(F) \
(((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT)
#define ARM_TBFLAG_VECLEN(F) \
@@ -1794,33 +1795,82 @@ static inline bool arm_singlestep_active(CPUARMState *env)
(((F) & ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT)
#define ARM_TBFLAG_BSWAP_CODE(F) \
(((F) & ARM_TBFLAG_BSWAP_CODE_MASK) >> ARM_TBFLAG_BSWAP_CODE_SHIFT)
-#define ARM_TBFLAG_CPACR_FPEN(F) \
- (((F) & ARM_TBFLAG_CPACR_FPEN_MASK) >> ARM_TBFLAG_CPACR_FPEN_SHIFT)
#define ARM_TBFLAG_XSCALE_CPAR(F) \
(((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT)
-#define ARM_TBFLAG_AA64_FPEN(F) \
- (((F) & ARM_TBFLAG_AA64_FPEN_MASK) >> ARM_TBFLAG_AA64_FPEN_SHIFT)
#define ARM_TBFLAG_NS(F) \
(((F) & ARM_TBFLAG_NS_MASK) >> ARM_TBFLAG_NS_SHIFT)
-static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
- target_ulong *cs_base, int *flags)
+/* Return the exception level to which FP-disabled exceptions should
+ * be taken, or 0 if FP is enabled.
+ */
+static inline int fp_exception_el(CPUARMState *env)
{
int fpen;
+ int cur_el = arm_current_el(env);
- if (arm_feature(env, ARM_FEATURE_V6)) {
- fpen = extract32(env->cp15.cpacr_el1, 20, 2);
- } else {
- /* CPACR doesn't exist before v6, so VFP is always accessible */
- fpen = 3;
+ /* CPACR and the CPTR registers don't exist before v6, so FP is
+ * always accessible
+ */
+ if (!arm_feature(env, ARM_FEATURE_V6)) {
+ return 0;
+ }
+
+ /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
+ * 0, 2 : trap EL0 and EL1/PL1 accesses
+ * 1 : trap only EL0 accesses
+ * 3 : trap no accesses
+ */
+ fpen = extract32(env->cp15.cpacr_el1, 20, 2);
+ switch (fpen) {
+ case 0:
+ case 2:
+ if (cur_el == 0 || cur_el == 1) {
+ /* Trap to PL1, which might be EL1 or EL3 */
+ if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
+ return 3;
+ }
+ return 1;
+ }
+ if (cur_el == 3 && !is_a64(env)) {
+ /* Secure PL1 running at EL3 */
+ return 3;
+ }
+ break;
+ case 1:
+ if (cur_el == 0) {
+ return 1;
+ }
+ break;
+ case 3:
+ break;
+ }
+
+ /* For the CPTR registers we don't need to guard with an ARM_FEATURE
+ * check because zero bits in the registers mean "don't trap".
+ */
+
+ /* CPTR_EL2 : present in v7VE or v8 */
+ if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
+ && !arm_is_secure_below_el3(env)) {
+ /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
+ return 2;
+ }
+
+ /* CPTR_EL3 : present in v8 */
+ if (extract32(env->cp15.cptr_el[3], 10, 1)) {
+ /* Trap all FP ops to EL3 */
+ return 3;
}
+ return 0;
+}
+
+static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
+ target_ulong *cs_base, int *flags)
+{
if (is_a64(env)) {
*pc = env->pc;
*flags = ARM_TBFLAG_AARCH64_STATE_MASK;
- if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) {
- *flags |= ARM_TBFLAG_AA64_FPEN_MASK;
- }
} else {
*pc = env->regs[15];
*flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
@@ -1835,9 +1885,6 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|| arm_el_is_aa64(env, 1)) {
*flags |= ARM_TBFLAG_VFPEN_MASK;
}
- if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) {
- *flags |= ARM_TBFLAG_CPACR_FPEN_MASK;
- }
*flags |= (extract32(env->cp15.c15_cpar, 0, 2)
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
}
@@ -1862,6 +1909,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
}
}
}
+ *flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT;
*cs_base = 0;
}
@@ -412,7 +412,7 @@ static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
static inline void assert_fp_access_checked(DisasContext *s)
{
#ifdef CONFIG_DEBUG_TCG
- if (unlikely(!s->fp_access_checked || !s->cpacr_fpen)) {
+ if (unlikely(!s->fp_access_checked || s->fp_excp_el)) {
fprintf(stderr, "target-arm: FP access check missing for "
"instruction 0x%08x\n", s->insn);
abort();
@@ -972,12 +972,12 @@ static inline bool fp_access_check(DisasContext *s)
assert(!s->fp_access_checked);
s->fp_access_checked = true;
- if (s->cpacr_fpen) {
+ if (!s->fp_excp_el) {
return true;
}
gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
- default_exception_el(s));
+ s->fp_excp_el);
return false;
}
@@ -10954,7 +10954,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
#if !defined(CONFIG_USER_ONLY)
dc->user = (dc->current_el == 0);
#endif
- dc->cpacr_fpen = ARM_TBFLAG_AA64_FPEN(tb->flags);
+ dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
dc->vec_len = 0;
dc->vec_stride = 0;
dc->cp_regs = cpu->cp_regs;
@@ -3044,10 +3044,9 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
* for invalid encodings; we will generate incorrect syndrome information
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
- if (!s->cpacr_fpen) {
+ if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, s->thumb),
- default_exception_el(s));
+ syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
return 0;
}
@@ -4363,10 +4362,9 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
* for invalid encodings; we will generate incorrect syndrome information
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
- if (!s->cpacr_fpen) {
+ if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, s->thumb),
- default_exception_el(s));
+ syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
return 0;
}
@@ -5102,10 +5100,9 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
* for invalid encodings; we will generate incorrect syndrome information
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
- if (!s->cpacr_fpen) {
+ if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, s->thumb),
- default_exception_el(s));
+ syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
return 0;
}
@@ -11082,7 +11079,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
dc->user = (dc->current_el == 0);
#endif
dc->ns = ARM_TBFLAG_NS(tb->flags);
- dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
+ dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
@@ -22,7 +22,7 @@ typedef struct DisasContext {
#endif
ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
bool ns; /* Use non-secure CPREG bank on access */
- bool cpacr_fpen; /* FP enabled via CPACR.FPEN */
+ int fp_excp_el; /* FP exception EL or 0 if enabled */
bool el3_is_aa64; /* Flag indicating whether EL3 is AArch64 or not */
bool vfp_enabled; /* FP enabled via FPSCR.EN */
int vec_len;