@@ -55,6 +55,7 @@
#define EXCP_LSERR 21 /* v8M LSERR SecureFault */
#define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */
#define EXCP_DIVBYZERO 23 /* v7M DIVBYZERO UsageFault */
+#define EXCP_VSERR 24
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
#define ARMV7M_EXCP_RESET 1
@@ -88,6 +89,7 @@ enum {
#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
+#define CPU_INTERRUPT_VSERR CPU_INTERRUPT_TGT_INT_0
/* The usual mapping for an AArch64 system register to its AArch32
* counterpart is for the 32 bit world to have access to the lower
@@ -935,6 +935,14 @@ void arm_cpu_update_virq(ARMCPU *cpu);
*/
void arm_cpu_update_vfiq(ARMCPU *cpu);
+/**
+ * arm_cpu_update_vserr: Update CPU_INTERRUPT_VSERR bit
+ *
+ * Update the CPU_INTERRUPT_VSERR bit in cs->interrupt_request,
+ * following a change to the HCR_EL2.VSE bit.
+ */
+void arm_cpu_update_vserr(ARMCPU *cpu);
+
/**
* arm_mmu_idx_el:
* @env: The cpu environment
@@ -287,4 +287,9 @@ static inline uint32_t syn_pcalignment(void)
return (EC_PCALIGNMENT << ARM_EL_EC_SHIFT) | ARM_EL_IL;
}
+static inline uint32_t syn_serror(uint32_t extra)
+{
+ return (EC_SERROR << ARM_EL_EC_SHIFT) | ARM_EL_IL | extra;
+}
+
#endif /* TARGET_ARM_SYNDROME_H */
@@ -84,7 +84,7 @@ static bool arm_cpu_has_work(CPUState *cs)
return (cpu->power_state != PSCI_OFF)
&& cs->interrupt_request &
(CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
- | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
+ | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
| CPU_INTERRUPT_EXITTB);
}
@@ -508,6 +508,12 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
return false;
}
return !(env->daif & PSTATE_I);
+ case EXCP_VSERR:
+ if (!(hcr_el2 & HCR_AMO) || (hcr_el2 & HCR_TGE)) {
+ /* VIRQs are only taken when hypervized. */
+ return false;
+ }
+ return !(env->daif & PSTATE_A);
default:
g_assert_not_reached();
}
@@ -629,6 +635,17 @@ static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
goto found;
}
}
+ if (interrupt_request & CPU_INTERRUPT_VSERR) {
+ excp_idx = EXCP_VSERR;
+ target_el = 1;
+ if (arm_excp_unmasked(cs, excp_idx, target_el,
+ cur_el, secure, hcr_el2)) {
+ /* Taking a virtual abort clears HCR_EL2.VSE */
+ env->cp15.hcr_el2 &= ~HCR_VSE;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR);
+ goto found;
+ }
+ }
return false;
found:
@@ -681,6 +698,25 @@ void arm_cpu_update_vfiq(ARMCPU *cpu)
}
}
+void arm_cpu_update_vserr(ARMCPU *cpu)
+{
+ /*
+ * Update the interrupt level for VSERR, which is the HCR_EL2.VSE bit.
+ */
+ CPUARMState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+
+ bool new_state = env->cp15.hcr_el2 & HCR_VSE;
+
+ if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VSERR) != 0)) {
+ if (new_state) {
+ cpu_interrupt(cs, CPU_INTERRUPT_VSERR);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR);
+ }
+ }
+}
+
#ifndef CONFIG_USER_ONLY
static void arm_cpu_set_irq(void *opaque, int irq, int level)
{
@@ -1873,7 +1873,12 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
}
}
- /* External aborts are not possible in QEMU so A bit is always clear */
+ if (hcr_el2 & HCR_AMO) {
+ if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
+ ret |= CPSR_A;
+ }
+ }
+
return ret;
}
@@ -5338,6 +5343,7 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
g_assert(qemu_mutex_iothread_locked());
arm_cpu_update_virq(cpu);
arm_cpu_update_vfiq(cpu);
+ arm_cpu_update_vserr(cpu);
}
static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
@@ -9527,6 +9533,7 @@ void arm_log_exception(CPUState *cs)
[EXCP_LSERR] = "v8M LSERR UsageFault",
[EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
[EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
+ [EXCP_VSERR] = "Virtual SERR",
};
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
@@ -10039,6 +10046,31 @@ static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
mask = CPSR_A | CPSR_I | CPSR_F;
offset = 4;
break;
+ case EXCP_VSERR:
+ {
+ /*
+ * Note that this is reported as a data abort, but the DFAR
+ * has an UNKNOWN value. Construct the SError syndrome from
+ * AET and ExT fields.
+ */
+ ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, };
+
+ if (extended_addresses_enabled(env)) {
+ env->exception.fsr = arm_fi_to_lfsc(&fi);
+ } else {
+ env->exception.fsr = arm_fi_to_sfsc(&fi);
+ }
+ env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000;
+ A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
+ qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n",
+ env->exception.fsr);
+
+ new_mode = ARM_CPU_MODE_ABT;
+ addr = 0x10;
+ mask = CPSR_A | CPSR_I;
+ offset = 8;
+ }
+ break;
case EXCP_SMC:
new_mode = ARM_CPU_MODE_MON;
addr = 0x08;
@@ -10259,6 +10291,12 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
case EXCP_VFIQ:
addr += 0x100;
break;
+ case EXCP_VSERR:
+ addr += 0x180;
+ /* Construct the SError syndrome from IDS and ISS fields. */
+ env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff);
+ env->cp15.esr_el[new_el] = env->exception.syndrome;
+ break;
default:
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
}
Virtual SError exceptions are raised by setting HCR_EL2.VSE, and are routed to EL1 just like other virtual exceptions. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- v2: Honor EAE for reporting VSERR to aa32. --- target/arm/cpu.h | 2 ++ target/arm/internals.h | 8 ++++++++ target/arm/syndrome.h | 5 +++++ target/arm/cpu.c | 38 +++++++++++++++++++++++++++++++++++++- target/arm/helper.c | 40 +++++++++++++++++++++++++++++++++++++++- 5 files changed, 91 insertions(+), 2 deletions(-)