@@ -16,10 +16,8 @@
static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
- bool s1_is_el0, hwaddr *phys_ptr,
- MemTxAttrs *txattrs, int *prot,
- target_ulong *page_size_ptr,
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
+ bool s1_is_el0, GetPhysAddrResult *result,
+ ARMMMUFaultInfo *fi)
__attribute__((nonnull));
/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
@@ -204,18 +202,13 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
{
if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
!regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
- target_ulong s2size;
- hwaddr s2pa;
- int s2prot;
- int ret;
ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
: ARMMMUIdx_Stage2;
- ARMCacheAttrs cacheattrs = {};
- MemTxAttrs txattrs = {};
+ GetPhysAddrResult s2 = {};
+ int ret;
ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
- &s2pa, &txattrs, &s2prot, &s2size, fi,
- &cacheattrs);
+ &s2, fi);
if (ret) {
assert(fi->type != ARMFault_None);
fi->s2addr = addr;
@@ -225,7 +218,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
return ~0;
}
if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
- ptw_attrs_are_device(env, cacheattrs)) {
+ ptw_attrs_are_device(env, s2.cacheattrs)) {
/*
* PTW set and S1 walk touched S2 Device memory:
* generate Permission fault.
@@ -249,7 +242,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
assert(!*is_secure);
}
- addr = s2pa;
+ addr = s2.phys;
}
return addr;
}
@@ -972,19 +965,13 @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
* table walk), must be true if this is stage 2 of a stage 1+2
* walk for an EL0 access. If @mmu_idx is anything else,
* @s1_is_el0 is ignored.
- * @phys_ptr: set to the physical address corresponding to the virtual address
- * @attrs: set to the memory transaction attributes to use
- * @prot: set to the permissions for the page containing phys_ptr
- * @page_size_ptr: set to the size of the page containing phys_ptr
+ * @result: set on translation success,
* @fi: set to fault info if the translation fails
- * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
*/
static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
- bool s1_is_el0, hwaddr *phys_ptr,
- MemTxAttrs *txattrs, int *prot,
- target_ulong *page_size_ptr,
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
+ bool s1_is_el0, GetPhysAddrResult *result,
+ ARMMMUFaultInfo *fi)
{
ARMCPU *cpu = env_archcpu(env);
/* Read an LPAE long-descriptor translation table. */
@@ -1302,16 +1289,16 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
ns = mmu_idx == ARMMMUIdx_Stage2;
xn = extract32(attrs, 11, 2);
- *prot = get_S2prot(env, ap, xn, s1_is_el0);
+ result->prot = get_S2prot(env, ap, xn, s1_is_el0);
} else {
ns = extract32(attrs, 3, 1);
xn = extract32(attrs, 12, 1);
pxn = extract32(attrs, 11, 1);
- *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
+ result->prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
}
fault_type = ARMFault_Permission;
- if (!(*prot & (1 << access_type))) {
+ if (!(result->prot & (1 << access_type))) {
goto do_fault;
}
@@ -1321,23 +1308,23 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
* the CPU doesn't support TZ or this is a non-secure translation
* regime, because the attribute will already be non-secure.
*/
- txattrs->secure = false;
+ result->attrs.secure = false;
}
/* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
- arm_tlb_bti_gp(txattrs) = true;
+ arm_tlb_bti_gp(&result->attrs) = true;
}
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
- cacheattrs->is_s2_format = true;
- cacheattrs->attrs = extract32(attrs, 0, 4);
+ result->cacheattrs.is_s2_format = true;
+ result->cacheattrs.attrs = extract32(attrs, 0, 4);
} else {
/* Index into MAIR registers for cache attributes */
uint8_t attrindx = extract32(attrs, 0, 3);
uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
assert(attrindx <= 7);
- cacheattrs->is_s2_format = false;
- cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
+ result->cacheattrs.is_s2_format = false;
+ result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
}
/*
@@ -1346,13 +1333,13 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
* that case comes from TCR_ELx, which we extracted earlier.
*/
if (param.ds) {
- cacheattrs->shareability = param.sh;
+ result->cacheattrs.shareability = param.sh;
} else {
- cacheattrs->shareability = extract32(attrs, 6, 2);
+ result->cacheattrs.shareability = extract32(attrs, 6, 2);
}
- *phys_ptr = descaddr;
- *page_size_ptr = page_size;
+ result->phys = descaddr;
+ result->page_size = page_size;
return false;
do_fault:
@@ -2354,10 +2341,8 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
cacheattrs1 = result->cacheattrs;
memset(result, 0, sizeof(*result));
- ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
- &result->phys, &result->attrs,
- &result->prot, &result->page_size,
- fi, &result->cacheattrs);
+ ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx,
+ is_el0, result, fi);
fi->s2addr = ipa;
/* Combine the S1 and S2 perms. */
@@ -2528,9 +2513,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
if (regime_using_lpae_format(env, mmu_idx)) {
return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
- &result->phys, &result->attrs,
- &result->prot, &result->page_size,
- fi, &result->cacheattrs);
+ result, fi);
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
return get_phys_addr_v6(env, address, access_type, mmu_idx,
&result->phys, &result->attrs,