@@ -1294,7 +1294,8 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
target_ulong *page_size,
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
+ __attribute__((nonnull));
void arm_log_exception(int idx);
@@ -44,7 +44,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
bool s1_is_el0,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
target_ulong *page_size_ptr,
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
+ __attribute__((nonnull));
#endif
static void switch_mode(CPUARMState *env, int mode);
@@ -11101,19 +11102,16 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
arm_tlb_bti_gp(txattrs) = true;
}
- if (cacheattrs != NULL) {
- if (mmu_idx == ARMMMUIdx_Stage2) {
- cacheattrs->attrs = convert_stage2_attrs(env,
- extract32(attrs, 0, 4));
- } else {
- /* Index into MAIR registers for cache attributes */
- uint8_t attrindx = extract32(attrs, 0, 3);
- uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
- assert(attrindx <= 7);
- cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
- }
- cacheattrs->shareability = extract32(attrs, 6, 2);
+ if (mmu_idx == ARMMMUIdx_Stage2) {
+ cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4));
+ } else {
+ /* Index into MAIR registers for cache attributes */
+ uint8_t attrindx = extract32(attrs, 0, 3);
+ uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
+ assert(attrindx <= 7);
+ cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
}
+ cacheattrs->shareability = extract32(attrs, 6, 2);
*phys_ptr = descaddr;
*page_size_ptr = page_size;
@@ -11948,28 +11946,29 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_Stage2,
mmu_idx == ARMMMUIdx_E10_0,
phys_ptr, attrs, &s2_prot,
- page_size, fi,
- cacheattrs != NULL ? &cacheattrs2 : NULL);
+ page_size, fi, &cacheattrs2);
fi->s2addr = ipa;
/* Combine the S1 and S2 perms. */
*prot &= s2_prot;
- /* Combine the S1 and S2 cache attributes, if needed */
- if (!ret && cacheattrs != NULL) {
- if (env->cp15.hcr_el2 & HCR_DC) {
- /*
- * HCR.DC forces the first stage attributes to
- * Normal Non-Shareable,
- * Inner Write-Back Read-Allocate Write-Allocate,
- * Outer Write-Back Read-Allocate Write-Allocate.
- */
- cacheattrs->attrs = 0xff;
- cacheattrs->shareability = 0;
- }
- *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
+ /* If S2 fails, return early. */
+ if (ret) {
+ return ret;
}
- return ret;
+ /* Combine the S1 and S2 cache attributes. */
+ if (env->cp15.hcr_el2 & HCR_DC) {
+ /*
+ * HCR.DC forces the first stage attributes to
+ * Normal Non-Shareable,
+ * Inner Write-Back Read-Allocate Write-Allocate,
+ * Outer Write-Back Read-Allocate Write-Allocate.
+ */
+ cacheattrs->attrs = 0xff;
+ cacheattrs->shareability = 0;
+ }
+ *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
+ return 0;
} else {
/*
* For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
@@ -12094,11 +12093,12 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
bool ret;
ARMMMUFaultInfo fi = {};
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+ ARMCacheAttrs cacheattrs = {};
*attrs = (MemTxAttrs) {};
ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
- attrs, &prot, &page_size, &fi, NULL);
+ attrs, &prot, &page_size, &fi, &cacheattrs);
if (ret) {
return -1;
@@ -187,12 +187,13 @@ static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
hwaddr physaddr;
int prot;
ARMMMUFaultInfo fi = {};
+ ARMCacheAttrs cacheattrs = {};
bool secure = mmu_idx & ARM_MMU_IDX_M_S;
int exc;
bool exc_secure;
if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &physaddr,
- &attrs, &prot, &page_size, &fi, NULL)) {
+ &attrs, &prot, &page_size, &fi, &cacheattrs)) {
/* MPU/SAU lookup failed */
if (fi.type == ARMFault_QEMU_SFault) {
if (mode == STACK_LAZYFP) {
@@ -279,13 +280,14 @@ static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
hwaddr physaddr;
int prot;
ARMMMUFaultInfo fi = {};
+ ARMCacheAttrs cacheattrs = {};
bool secure = mmu_idx & ARM_MMU_IDX_M_S;
int exc;
bool exc_secure;
uint32_t value;
if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &physaddr,
- &attrs, &prot, &page_size, &fi, NULL)) {
+ &attrs, &prot, &page_size, &fi, &cacheattrs)) {
/* MPU/SAU lookup failed */
if (fi.type == ARMFault_QEMU_SFault) {
qemu_log_mask(CPU_LOG_INT,
@@ -1928,6 +1930,7 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
V8M_SAttributes sattrs = {};
MemTxAttrs attrs = {};
ARMMMUFaultInfo fi = {};
+ ARMCacheAttrs cacheattrs = {};
MemTxResult txres;
target_ulong page_size;
hwaddr physaddr;
@@ -1945,8 +1948,8 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
"...really SecureFault with SFSR.INVEP\n");
return false;
}
- if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
- &physaddr, &attrs, &prot, &page_size, &fi, NULL)) {
+ if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &physaddr,
+ &attrs, &prot, &page_size, &fi, &cacheattrs)) {
/* the MPU lookup failed */
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
@@ -166,6 +166,7 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
int prot, ret;
MemTxAttrs attrs = {};
ARMMMUFaultInfo fi = {};
+ ARMCacheAttrs cacheattrs = {};
/*
* Walk the page table and (if the mapping exists) add the page
@@ -175,7 +176,8 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
*/
ret = get_phys_addr(&cpu->env, address, access_type,
core_to_arm_mmu_idx(&cpu->env, mmu_idx),
- &phys_addr, &attrs, &prot, &page_size, &fi, NULL);
+ &phys_addr, &attrs, &prot, &page_size,
+ &fi, &cacheattrs);
if (likely(!ret)) {
/*
* Map a single [sub]page. Regions smaller than our declared