@@ -958,12 +958,14 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
* @mmu_idx: MMU index indicating required translation regime
* @is_aa64: TRUE if AArch64
* @ap: The 2-bit simple AP (AP[2:1])
- * @ns: NS (non-secure) bit
* @xn: XN (execute-never) bit
* @pxn: PXN (privileged execute-never) bit
+ * @in_pa: The original input pa space
+ * @out_pa: The output pa space, modified by NSTable, NS, and NSE
*/
static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
- int ap, int ns, int xn, int pxn)
+ int ap, int xn, int pxn,
+ ARMSecuritySpace in_pa, ARMSecuritySpace out_pa)
{
bool is_user = regime_is_user(env, mmu_idx);
int prot_rw, user_rw;
@@ -984,7 +986,8 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
}
}
- if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
+ if (out_pa == ARMSS_NonSecure && in_pa == ARMSS_Secure &&
+ (env->cp15.scr_el3 & SCR_SIF)) {
return prot_rw;
}
@@ -1252,11 +1255,12 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
int32_t stride;
int addrsize, inputsize, outputsize;
uint64_t tcr = regime_tcr(env, mmu_idx);
- int ap, ns, xn, pxn;
+ int ap, xn, pxn;
uint32_t el = regime_el(env, mmu_idx);
uint64_t descaddrmask;
bool aarch64 = arm_el_is_aa64(env, el);
uint64_t descriptor, new_descriptor;
+ ARMSecuritySpace out_space;
/* TODO: This code does not support shareability levels. */
if (aarch64) {
@@ -1439,8 +1443,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
ptw->in_ptw_idx += 1;
ptw->in_secure = false;
ptw->in_space = ARMSS_NonSecure;
- result->f.attrs.secure = false;
- result->f.attrs.space = ARMSS_NonSecure;
}
if (!S1_ptw_translate(env, ptw, descaddr, fi)) {
@@ -1558,15 +1560,75 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
}
ap = extract32(attrs, 6, 2);
+ out_space = ptw->in_space;
if (regime_is_stage2(mmu_idx)) {
- ns = mmu_idx == ARMMMUIdx_Stage2;
+ /*
+ * R_GYNXY: For stage2 in Realm security state, bit 55 is NS.
+ * The bit remains ignored for other security states.
+ */
+ if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) {
+ out_space = ARMSS_NonSecure;
+ }
xn = extract64(attrs, 53, 2);
result->f.prot = get_S2prot(env, ap, xn, s1_is_el0);
} else {
- ns = extract32(attrs, 5, 1);
+ int nse, ns = extract32(attrs, 5, 1);
+ switch (out_space) {
+ case ARMSS_Root:
+ /*
+ * R_GVZML: Bit 11 becomes the NSE field in the EL3 regime.
+ * R_XTYPW: NSE and NS together select the output pa space.
+ */
+ nse = extract32(attrs, 11, 1);
+ out_space = (nse << 1) | ns;
+ if (out_space == ARMSS_Secure &&
+ !cpu_isar_feature(aa64_sel2, cpu)) {
+ out_space = ARMSS_NonSecure;
+ }
+ break;
+ case ARMSS_Secure:
+ if (ns) {
+ out_space = ARMSS_NonSecure;
+ }
+ break;
+ case ARMSS_Realm:
+ switch (mmu_idx) {
+ case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E1:
+ case ARMMMUIdx_Stage1_E1_PAN:
+ /* I_CZPRF: For Realm EL1&0 stage1, NS bit is RES0. */
+ break;
+ case ARMMMUIdx_E2:
+ case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ /*
+ * R_LYKFZ, R_WGRZN: For Realm EL2 and EL2&1,
+ * NS changes the output to non-secure space.
+ */
+ if (ns) {
+ out_space = ARMSS_NonSecure;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case ARMSS_NonSecure:
+ /* R_QRMFF: For NonSecure state, the NS bit is RES0. */
+ break;
+ default:
+ g_assert_not_reached();
+ }
xn = extract64(attrs, 54, 1);
pxn = extract64(attrs, 53, 1);
- result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
+
+ /*
+ * Note that we modified ptw->in_space earlier for NSTable, but
+ * result->f.attrs retains a copy of the original security space.
+ */
+ result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn,
+ result->f.attrs.space, out_space);
}
if (!(result->f.prot & (1 << access_type))) {
@@ -1593,15 +1655,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
}
}
- if (ns) {
- /*
- * The NS bit will (as required by the architecture) have no effect if
- * the CPU doesn't support TZ or this is a non-secure translation
- * regime, because the attribute will already be non-secure.
- */
- result->f.attrs.secure = false;
- result->f.attrs.space = ARMSS_NonSecure;
- }
+ result->f.attrs.space = out_space;
+ result->f.attrs.secure = arm_space_is_secure(out_space);
/* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */
if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) {