@@ -47,6 +47,7 @@ DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,
DEF_HELPER_2(exception_internal, void, env, i32)
DEF_HELPER_4(exception_with_syndrome, void, env, i32, i32, i32)
DEF_HELPER_2(exception_bkpt_insn, void, env, i32)
+DEF_HELPER_2(exception_pc_alignment, noreturn, env, tl)
DEF_HELPER_1(setend, void, env)
DEF_HELPER_2(wfi, void, env, i32)
DEF_HELPER_1(wfe, void, env)
@@ -282,4 +282,9 @@ static inline uint32_t syn_illegalstate(void)
return (EC_ILLEGALSTATE << ARM_EL_EC_SHIFT) | ARM_EL_IL;
}
+static inline uint32_t syn_pcalignment(void)
+{
+ return (EC_PCALIGNMENT << ARM_EL_EC_SHIFT) | ARM_EL_IL;
+}
+
#endif /* TARGET_ARM_SYNDROME_H */
@@ -9,6 +9,7 @@
#include "cpu.h"
#include "internals.h"
#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
unsigned int target_el,
@@ -123,6 +124,29 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
}
+void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc)
+{
+ int target_el = exception_target_el(env);
+
+ if (target_el == 2 || arm_el_is_aa64(env, target_el)) {
+ /*
+ * To aarch64 and aarch32 el2, pc alignment has a
+ * special exception class.
+ */
+ env->exception.vaddress = pc;
+ env->exception.fsr = 0;
+ raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el);
+ } else {
+ /*
+ * To aarch32 el1, pc alignment is like data alignment
+ * except with a prefetch abort.
+ */
+ ARMMMUFaultInfo fi = { .type = ARMFault_Alignment };
+ arm_deliver_fault(env_archcpu(env), pc, MMU_INST_FETCH,
+ cpu_mmu_index(env, true), &fi);
+ }
+}
+
#if !defined(CONFIG_USER_ONLY)
/*
@@ -14752,8 +14752,10 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *s = container_of(dcbase, DisasContext, base);
CPUARMState *env = cpu->env_ptr;
+ uint64_t pc = s->base.pc_next;
uint32_t insn;
+ /* Singlestep exceptions have the highest priority. */
if (s->ss_active && !s->pstate_ss) {
/* Singlestep state is Active-pending.
* If we're in this state at the start of a TB then either
@@ -14768,13 +14770,28 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
assert(s->base.num_insns == 1);
gen_swstep_exception(s, 0, 0);
s->base.is_jmp = DISAS_NORETURN;
+ s->base.pc_next = pc + 4;
return;
}
- s->pc_curr = s->base.pc_next;
- insn = arm_ldl_code(env, &s->base, s->base.pc_next, s->sctlr_b);
+ if (pc & 3) {
+ /*
+ * PC alignment fault. This has priority over the instruction abort
+ * that we would receive from a translation fault via arm_ldl_code.
+ * This should only be possible after an indirect branch, at the
+ * start of the TB.
+ */
+ assert(s->base.num_insns == 1);
+ gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc));
+ s->base.is_jmp = DISAS_NORETURN;
+ s->base.pc_next = QEMU_ALIGN_UP(pc, 4);
+ return;
+ }
+
+ s->pc_curr = pc;
+ insn = arm_ldl_code(env, &s->base, pc, s->sctlr_b);
s->insn = insn;
- s->base.pc_next += 4;
+ s->base.pc_next = pc + 4;
s->fp_access_checked = false;
s->sve_access_checked = false;
@@ -9497,7 +9497,7 @@ static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
dc->insn_start = tcg_last_op();
}
-static bool arm_pre_translate_insn(DisasContext *dc)
+static bool arm_check_kernelpage(DisasContext *dc)
{
#ifdef CONFIG_USER_ONLY
/* Intercept jump to the magic kernel page. */
@@ -9509,7 +9509,11 @@ static bool arm_pre_translate_insn(DisasContext *dc)
return true;
}
#endif
+ return false;
+}
+static bool arm_check_ss_active(DisasContext *dc)
+{
if (dc->ss_active && !dc->pstate_ss) {
/* Singlestep state is Active-pending.
* If we're in this state at the start of a TB then either
@@ -9543,17 +9547,38 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUARMState *env = cpu->env_ptr;
+ uint32_t pc = dc->base.pc_next;
unsigned int insn;
- if (arm_pre_translate_insn(dc)) {
- dc->base.pc_next += 4;
+ /* Singlestep exceptions have the highest priority. */
+ if (arm_check_ss_active(dc)) {
+ dc->base.pc_next = pc + 4;
return;
}
- dc->pc_curr = dc->base.pc_next;
- insn = arm_ldl_code(env, &dc->base, dc->base.pc_next, dc->sctlr_b);
+ if (pc & 3) {
+ /*
+ * PC alignment fault. This has priority over the instruction abort
+ * that we would receive from a translation fault via arm_ldl_code
+ * (or the execution of the kernelpage entrypoint). This should only
+ * be possible after an indirect branch, at the start of the TB.
+ */
+ assert(dc->base.num_insns == 1);
+ gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc));
+ dc->base.is_jmp = DISAS_NORETURN;
+ dc->base.pc_next = QEMU_ALIGN_UP(pc, 4);
+ return;
+ }
+
+ if (arm_check_kernelpage(dc)) {
+ dc->base.pc_next = pc + 4;
+ return;
+ }
+
+ dc->pc_curr = pc;
+ insn = arm_ldl_code(env, &dc->base, pc, dc->sctlr_b);
dc->insn = insn;
- dc->base.pc_next += 4;
+ dc->base.pc_next = pc + 4;
disas_arm_insn(dc, insn);
arm_post_translate_insn(dc);
@@ -9615,7 +9640,7 @@ static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
uint32_t insn;
bool is_16bit;
- if (arm_pre_translate_insn(dc)) {
+ if (arm_check_ss_active(dc) || arm_check_kernelpage(dc)) {
dc->base.pc_next += 2;
return;
}
For A64, any input to an indirect branch can cause this. For A32, many indirect branch paths force the branch to be aligned, but BXWritePC does not. This includes the BX instruction but also other interworking changes to PC. Prior to v8, this case is UNDEFINED. With v8, this is CONSTRAINED UNPREDICTABLE and may either raise an exception or force align the PC. We choose to raise an exception because we have the infrastructure, it makes the generated code for gen_bx simpler, and it has the possibility of catching more guest bugs. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/arm/helper.h | 1 + target/arm/syndrome.h | 5 +++++ target/arm/tlb_helper.c | 24 +++++++++++++++++++++++ target/arm/translate-a64.c | 23 +++++++++++++++++++--- target/arm/translate.c | 39 +++++++++++++++++++++++++++++++------- 5 files changed, 82 insertions(+), 10 deletions(-) -- 2.25.1