@@ -57,6 +57,7 @@
#define EXCP_UNALIGNED 22 /* v7M UNALIGNED UsageFault */
#define EXCP_DIVBYZERO 23 /* v7M DIVBYZERO UsageFault */
#define EXCP_VSERR 24
+#define EXCP_GPC 25 /* v9 Granule Protection Check Fault */
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
#define ARMV7M_EXCP_RESET 1
@@ -352,14 +352,27 @@ typedef enum ARMFaultType {
ARMFault_ICacheMaint,
ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
+ ARMFault_GPCFOnWalk,
+ ARMFault_GPCFOnOutput,
} ARMFaultType;
+typedef enum ARMGPCF {
+ GPCF_None,
+ GPCF_AddressSize,
+ GPCF_Walk,
+ GPCF_EABT,
+ GPCF_Fail,
+} ARMGPCF;
+
/**
* ARMMMUFaultInfo: Information describing an ARM MMU Fault
* @type: Type of fault
+ * @gpcf: Subtype of ARMFault_GPCFOn{Walk,Output}.
* @level: Table walk level (for translation, access flag and permission faults)
* @domain: Domain of the fault address (for non-LPAE CPUs only)
* @s2addr: Address that caused a fault at stage 2
+ * @paddr: physical address that caused a fault for gpc
+ * @paddr_space: physical address space that caused a fault for gpc
* @stage2: True if we faulted at stage 2
* @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
* @s1ns: True if we faulted on a non-secure IPA while in secure state
@@ -368,7 +381,10 @@ typedef enum ARMFaultType {
typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
struct ARMMMUFaultInfo {
ARMFaultType type;
+ ARMGPCF gpcf;
target_ulong s2addr;
+ target_ulong paddr;
+ ARMSecuritySpace paddr_space;
int level;
int domain;
bool stage2;
@@ -542,6 +558,17 @@ static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
case ARMFault_Exclusive:
fsc = 0x35;
break;
+ case ARMFault_GPCFOnWalk:
+ assert(fi->level >= -1 && fi->level <= 3);
+ if (fi->level < 0) {
+ fsc = 0b100011;
+ } else {
+ fsc = 0b100100 | fi->level;
+ }
+ break;
+ case ARMFault_GPCFOnOutput:
+ fsc = 0b101000;
+ break;
default:
/* Other faults can't occur in a context that requires a
* long-format status code.
@@ -10214,6 +10214,7 @@ void arm_log_exception(CPUState *cs)
[EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault",
[EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault",
[EXCP_VSERR] = "Virtual SERR",
+ [EXCP_GPC] = "Granule Protection Check",
};
if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
@@ -10945,6 +10946,10 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
}
switch (cs->exception_index) {
+ case EXCP_GPC:
+ qemu_log_mask(CPU_LOG_INT, "...with MFAR 0x%" PRIx64 "\n",
+ env->cp15.mfar_el3);
+ /* fall through */
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
/*
@@ -91,17 +91,106 @@ static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi,
return fsr;
}
+static bool report_as_gpc_exception(ARMCPU *cpu, int current_el,
+ ARMMMUFaultInfo *fi)
+{
+ bool ret;
+
+ switch (fi->gpcf) {
+ case GPCF_None:
+ return false;
+ case GPCF_AddressSize:
+ case GPCF_Walk:
+ case GPCF_EABT:
+ /* R_PYTGX: GPT faults are reported as GPC. */
+ ret = true;
+ break;
+ case GPCF_Fail:
+ /*
+ * R_BLYPM: A GPF at EL3 is reported as insn or data abort.
+ * R_VBZMW, R_LXHQR: A GPF at EL[0-2] is reported as a GPC
+ * if SCR_EL3.GPF is set, otherwise an insn or data abort.
+ */
+ ret = (cpu->env.cp15.scr_el3 & SCR_GPF) && current_el != 3;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ assert(cpu_isar_feature(aa64_rme, cpu));
+ assert(fi->type == ARMFault_GPCFOnWalk ||
+ fi->type == ARMFault_GPCFOnOutput);
+ if (fi->gpcf == GPCF_AddressSize) {
+ assert(fi->level == 0);
+ } else {
+ assert(fi->level >= 0 && fi->level <= 1);
+ }
+
+ return ret;
+}
+
+static unsigned encode_gpcsc(ARMMMUFaultInfo *fi)
+{
+ static uint8_t const gpcsc[] = {
+ [GPCF_AddressSize] = 0b000000,
+ [GPCF_Walk] = 0b000100,
+ [GPCF_Fail] = 0b001100,
+ [GPCF_EABT] = 0b010100,
+ };
+
+ /* Note that we've validated fi->gpcf and fi->level above. */
+ return gpcsc[fi->gpcf] | fi->level;
+}
+
static G_NORETURN
void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
MMUAccessType access_type,
int mmu_idx, ARMMMUFaultInfo *fi)
{
CPUARMState *env = &cpu->env;
- int target_el;
+ int target_el = exception_target_el(env);
+ int current_el = arm_current_el(env);
bool same_el;
uint32_t syn, exc, fsr, fsc;
- target_el = exception_target_el(env);
+ if (report_as_gpc_exception(cpu, current_el, fi)) {
+ target_el = 3;
+
+ fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc);
+
+ syn = syn_gpc(fi->stage2 && fi->type == ARMFault_GPCFOnWalk,
+ access_type == MMU_INST_FETCH,
+ encode_gpcsc(fi), 0, fi->s1ptw,
+ access_type == MMU_DATA_STORE, fsc);
+
+ env->cp15.mfar_el3 = fi->paddr;
+ switch (fi->paddr_space) {
+ case ARMSS_Secure:
+ break;
+ case ARMSS_NonSecure:
+ env->cp15.mfar_el3 |= R_MFAR_NS_MASK;
+ break;
+ case ARMSS_Root:
+ env->cp15.mfar_el3 |= R_MFAR_NSE_MASK;
+ break;
+ case ARMSS_Realm:
+ env->cp15.mfar_el3 |= R_MFAR_NSE_MASK | R_MFAR_NS_MASK;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ exc = EXCP_GPC;
+ goto do_raise;
+ }
+
+ /* If SCR_EL3.GPF is unset, GPF may still be routed to EL2. */
+ if (fi->gpcf == GPCF_Fail && target_el < 2) {
+ if (arm_hcr_el2_eff(env) & HCR_GPF) {
+ target_el = 2;
+ }
+ }
+
if (fi->stage2) {
target_el = 2;
env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
@@ -109,8 +198,8 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
env->cp15.hpfar_el2 |= HPFAR_NS;
}
}
- same_el = (arm_current_el(env) == target_el);
+ same_el = current_el == target_el;
fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc);
if (access_type == MMU_INST_FETCH) {
@@ -128,6 +217,7 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
exc = EXCP_DATA_ABORT;
}
+ do_raise:
env->exception.vaddress = addr;
env->exception.fsr = fsr;
raise_exception(env, exc, syn, target_el);