@@ -194,6 +194,10 @@ FIELD(GICR_VPENDBASER, VALID, 63, 1)
#define ICC_CTLR_EL3_A3V (1U << 15)
#define ICC_CTLR_EL3_NDS (1U << 17)
+#define ICC_AP1R_EL1_NMI (1ULL << 63)
+#define ICC_RPR_EL1_NSNMI (1ULL << 62)
+#define ICC_RPR_EL1_NMI (1ULL << 63)
+
#define ICH_VMCR_EL2_VENG0_SHIFT 0
#define ICH_VMCR_EL2_VENG0 (1U << ICH_VMCR_EL2_VENG0_SHIFT)
#define ICH_VMCR_EL2_VENG1_SHIFT 1
@@ -511,6 +515,7 @@ FIELD(VTE, RDBASE, 42, RDBASE_PROCNUM_LENGTH)
/* Special interrupt IDs */
#define INTID_SECURE 1020
#define INTID_NONSECURE 1021
+#define INTID_NMI 1022
#define INTID_SPURIOUS 1023
/* Functions internal to the emulated GICv3 */
@@ -225,6 +225,13 @@ struct GICv3CPUState {
/* This is temporary working state, to avoid a malloc in gicv3_update() */
bool seenbetter;
+
+ /*
+ * Whether the CPU interface has NMI support (FEAT_GICv3_NMI). The
+ * CPU interface may support NMIs even when the GIC proper (what the
+ * spec calls the IRI; the redistributors and distributor) does not.
+ */
+ bool nmi_support;
};
/*
@@ -21,6 +21,7 @@
#include "hw/irq.h"
#include "cpu.h"
#include "target/arm/cpregs.h"
+#include "target/arm/cpu-features.h"
#include "sysemu/tcg.h"
#include "sysemu/qtest.h"
@@ -795,6 +796,13 @@ static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
return intid;
}
+static uint64_t icv_nmiar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* todo */
+ uint64_t intid = INTID_SPURIOUS;
+ return intid;
+}
+
static uint32_t icc_fullprio_mask(GICv3CPUState *cs)
{
/*
@@ -832,6 +840,23 @@ static int icc_highest_active_prio(GICv3CPUState *cs)
*/
int i;
+ if (cs->nmi_support) {
+ /*
+ * If an NMI is active this takes precedence over anything else
+ * for priority purposes; the NMI bit is only in the AP1R0 bit.
+ * We return here the effective priority of the NMI, which is
+ * either 0x0 or 0x80. Callers will need to check NMI again for
+ * purposes of either setting the RPR register bits or for
+ * prioritization of NMI vs non-NMI.
+ */
+ if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) {
+ return 0;
+ }
+ if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
+ return (cs->gic->gicd_ctlr & GICD_CTLR_DS) ? 0 : 0x80;
+ }
+ }
+
for (i = 0; i < icc_num_aprs(cs); i++) {
uint32_t apr = cs->icc_apr[GICV3_G0][i] |
cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i];
@@ -898,12 +923,24 @@ static bool icc_hppi_can_preempt(GICv3CPUState *cs)
*/
int rprio;
uint32_t mask;
+ ARMCPU *cpu = ARM_CPU(cs->cpu);
+ CPUARMState *env = &cpu->env;
if (icc_no_enabled_hppi(cs)) {
return false;
}
- if (cs->hppi.prio >= cs->icc_pmr_el1) {
+ if (cs->hppi.nmi) {
+ if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
+ cs->hppi.grp == GICV3_G1NS) {
+ if (cs->icc_pmr_el1 < 0x80) {
+ return false;
+ }
+ if (arm_is_secure(env) && cs->icc_pmr_el1 == 0x80) {
+ return false;
+ }
+ }
+ } else if (cs->hppi.prio >= cs->icc_pmr_el1) {
/* Priority mask masks this interrupt */
return false;
}
@@ -923,6 +960,12 @@ static bool icc_hppi_can_preempt(GICv3CPUState *cs)
return true;
}
+ if (cs->hppi.nmi && (cs->hppi.prio & mask) == (rprio & mask)) {
+ if (!(cs->icc_apr[cs->hppi.grp][0] & ICC_AP1R_EL1_NMI)) {
+ return true;
+ }
+ }
+
return false;
}
@@ -1044,8 +1087,13 @@ static void icc_activate_irq(GICv3CPUState *cs, int irq)
int aprbit = prio >> (8 - cs->prebits);
int regno = aprbit / 32;
int regbit = aprbit % 32;
+ bool nmi = cs->hppi.nmi;
- cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit);
+ if (nmi) {
+ cs->icc_apr[cs->hppi.grp][regno] |= ICC_AP1R_EL1_NMI;
+ } else {
+ cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit);
+ }
if (irq < GIC_INTERNAL) {
cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1);
@@ -1159,6 +1207,7 @@ static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri)
static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
GICv3CPUState *cs = icc_cs_from_env(env);
+ int el = arm_current_el(env);
uint64_t intid;
if (icv_access(env, HCR_IMO)) {
@@ -1172,13 +1221,44 @@ static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
}
if (!gicv3_intid_is_special(intid)) {
- icc_activate_irq(cs, intid);
+ if (cs->hppi.nmi && env->cp15.sctlr_el[el] & SCTLR_NMI) {
+ intid = INTID_NMI;
+ } else {
+ icc_activate_irq(cs, intid);
+ }
}
trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid);
return intid;
}
+static uint64_t icc_nmiar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ GICv3CPUState *cs = icc_cs_from_env(env);
+ uint64_t intid;
+
+ if (icv_access(env, HCR_IMO)) {
+ return icv_nmiar1_read(env, ri);
+ }
+
+ if (!icc_hppi_can_preempt(cs)) {
+ intid = INTID_SPURIOUS;
+ } else {
+ intid = icc_hppir1_value(cs, env);
+ }
+
+ if (!gicv3_intid_is_special(intid)) {
+ if (!cs->hppi.nmi) {
+ intid = INTID_SPURIOUS;
+ } else {
+ icc_activate_irq(cs, intid);
+ }
+ }
+
+ trace_gicv3_icc_nmiar1_read(gicv3_redist_affid(cs), intid);
+ return intid;
+}
+
static void icc_drop_prio(GICv3CPUState *cs, int grp)
{
/* Drop the priority of the currently active interrupt in
@@ -1205,6 +1285,12 @@ static void icc_drop_prio(GICv3CPUState *cs, int grp)
if (!*papr) {
continue;
}
+
+ if (i == 0 && cs->nmi_support && (*papr & ICC_AP1R_EL1_NMI)) {
+ *papr &= (~ICC_AP1R_EL1_NMI);
+ break;
+ }
+
/* Clear the lowest set bit */
*papr &= *papr - 1;
break;
@@ -1239,6 +1325,15 @@ static int icc_highest_active_group(GICv3CPUState *cs)
*/
int i;
+ if (cs->nmi_support) {
+ if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) {
+ return GICV3_G1;
+ }
+ if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
+ return GICV3_G1NS;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]);
int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]);
@@ -1693,7 +1788,11 @@ static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
return;
}
- cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU;
+ if (cs->nmi_support) {
+ cs->icc_apr[grp][regno] = value & (0xFFFFFFFFU | ICC_AP1R_EL1_NMI);
+ } else {
+ cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU;
+ }
gicv3_cpuif_update(cs);
}
@@ -1783,7 +1882,7 @@ static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
GICv3CPUState *cs = icc_cs_from_env(env);
- int prio;
+ uint64_t prio;
if (icv_access(env, HCR_FMO | HCR_IMO)) {
return icv_rpr_read(env, ri);
@@ -1803,6 +1902,22 @@ static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
}
}
+ if (cs->nmi_support) {
+ /* NMI info is reported in the high bits of RPR */
+ if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) {
+ if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
+ prio |= ICC_RPR_EL1_NMI;
+ }
+ } else {
+ if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) {
+ prio |= ICC_RPR_EL1_NSNMI;
+ }
+ if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) {
+ prio |= ICC_RPR_EL1_NMI;
+ }
+ }
+ }
+
trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio);
return prio;
}
@@ -2482,6 +2597,15 @@ static const ARMCPRegInfo gicv3_cpuif_icc_apxr23_reginfo[] = {
},
};
+static const ARMCPRegInfo gicv3_cpuif_gicv3_nmi_reginfo[] = {
+ { .name = "ICC_NMIAR1_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 5,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_R, .accessfn = gicv3_irq_access,
+ .readfn = icc_nmiar1_read,
+ },
+};
+
static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
GICv3CPUState *cs = icc_cs_from_env(env);
@@ -2838,6 +2962,19 @@ void gicv3_init_cpuif(GICv3State *s)
*/
define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
+ /*
+ * If the CPU implements FEAT_NMI and FEAT_GICv3 it must also
+ * implement FEAT_GICv3_NMI, which is the CPU interface part
+ * of NMI support. This is distinct from whether the GIC proper
+ * (redistributors and distributor) have NMI support. In QEMU
+ * that is a property of the GIC device in s->nmi_support;
+ * cs->nmi_support indicates the CPU interface's support.
+ */
+ if (cpu_isar_feature(aa64_nmi, cpu)) {
+ cs->nmi_support = true;
+ define_arm_cp_regs(cpu, gicv3_cpuif_gicv3_nmi_reginfo);
+ }
+
/*
* The CPU implementation specifies the number of supported
* bits of physical priority. For backwards compatibility
@@ -116,6 +116,7 @@ gicv3_cpuif_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel) "GICv3 CPU i/f
gicv3_icc_generate_sgi(uint32_t cpuid, int irq, int irm, uint32_t aff, uint32_t targetlist) "GICv3 CPU i/f 0x%x generating SGI %d IRM %d target affinity 0x%xxx targetlist 0x%x"
gicv3_icc_iar0_read(uint32_t cpu, uint64_t val) "GICv3 ICC_IAR0 read cpu 0x%x value 0x%" PRIx64
gicv3_icc_iar1_read(uint32_t cpu, uint64_t val) "GICv3 ICC_IAR1 read cpu 0x%x value 0x%" PRIx64
+gicv3_icc_nmiar1_read(uint32_t cpu, uint64_t val) "GICv3 ICC_NMIAR1 read cpu 0x%x value 0x%" PRIx64
gicv3_icc_eoir_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_EOIR%d write cpu 0x%x value 0x%" PRIx64
gicv3_icc_hppir0_read(uint32_t cpu, uint64_t val) "GICv3 ICC_HPPIR0 read cpu 0x%x value 0x%" PRIx64
gicv3_icc_hppir1_read(uint32_t cpu, uint64_t val) "GICv3 ICC_HPPIR1 read cpu 0x%x value 0x%" PRIx64