@@ -374,8 +374,9 @@ static inline int cpu_mmu_index (CPUMBState *env, bool ifetch)
return MMU_KERNEL_IDX;
}
-int mb_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
- int mmu_idx);
+bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
#include "exec/cpu-all.h"
@@ -304,9 +304,8 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
cc->set_pc = mb_cpu_set_pc;
cc->gdb_read_register = mb_cpu_gdb_read_register;
cc->gdb_write_register = mb_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = mb_cpu_handle_mmu_fault;
-#else
+ cc->tlb_fill = mb_cpu_tlb_fill;
+#ifndef CONFIG_USER_ONLY
cc->do_transaction_failed = mb_cpu_transaction_failed;
cc->get_phys_page_debug = mb_cpu_get_phys_page_debug;
#endif
@@ -38,73 +38,80 @@ void mb_cpu_do_interrupt(CPUState *cs)
env->regs[14] = env->sregs[SR_PC];
}
-int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
cs->exception_index = 0xaa;
- cpu_dump_state(cs, stderr, 0);
- return 1;
+ cpu_loop_exit_restore(cs, retaddr);
}
#else /* !CONFIG_USER_ONLY */
-int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
- int mmu_idx)
+bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
{
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
CPUMBState *env = &cpu->env;
+ struct microblaze_mmu_lookup lu;
unsigned int hit;
- int r = 1;
int prot;
- /* Translate if the MMU is available and enabled. */
- if (mmu_idx != MMU_NOMMU_IDX) {
- uint32_t vaddr, paddr;
- struct microblaze_mmu_lookup lu;
-
- hit = mmu_translate(&env->mmu, &lu, address, rw, mmu_idx);
- if (hit) {
- vaddr = address & TARGET_PAGE_MASK;
- paddr = lu.paddr + vaddr - lu.vaddr;
-
- qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
- mmu_idx, vaddr, paddr, lu.prot);
- tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
- r = 0;
- } else {
- env->sregs[SR_EAR] = address;
- qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
- mmu_idx, address);
-
- switch (lu.err) {
- case ERR_PROT:
- env->sregs[SR_ESR] = rw == 2 ? 17 : 16;
- env->sregs[SR_ESR] |= (rw == 1) << 10;
- break;
- case ERR_MISS:
- env->sregs[SR_ESR] = rw == 2 ? 19 : 18;
- env->sregs[SR_ESR] |= (rw == 1) << 10;
- break;
- default:
- abort();
- break;
- }
-
- if (cs->exception_index == EXCP_MMU) {
- cpu_abort(cs, "recursive faults\n");
- }
-
- /* TLB miss. */
- cs->exception_index = EXCP_MMU;
- }
- } else {
+ if (mmu_idx == MMU_NOMMU_IDX) {
/* MMU disabled or not available. */
address &= TARGET_PAGE_MASK;
prot = PAGE_BITS;
tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
- r = 0;
+ return true;
}
- return r;
+
+ hit = mmu_translate(&env->mmu, &lu, address, access_type, mmu_idx);
+ if (likely(hit)) {
+ uint32_t vaddr = address & TARGET_PAGE_MASK;
+ uint32_t paddr = lu.paddr + vaddr - lu.vaddr;
+
+ qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
+ mmu_idx, vaddr, paddr, lu.prot);
+ tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
+ return true;
+ }
+
+ /* TLB miss. */
+ if (probe) {
+ return false;
+ }
+
+ qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
+ mmu_idx, address);
+
+ env->sregs[SR_EAR] = address;
+ switch (lu.err) {
+ case ERR_PROT:
+ env->sregs[SR_ESR] = access_type == MMU_INST_FETCH ? 17 : 16;
+ env->sregs[SR_ESR] |= (access_type == MMU_DATA_STORE) << 10;
+ break;
+ case ERR_MISS:
+ env->sregs[SR_ESR] = access_type == MMU_INST_FETCH ? 19 : 18;
+ env->sregs[SR_ESR] |= (access_type == MMU_DATA_STORE) << 10;
+ break;
+ default:
+ abort();
+ }
+
+ if (cs->exception_index == EXCP_MMU) {
+ cpu_abort(cs, "recursive faults\n");
+ }
+
+ /* TLB miss. */
+ cs->exception_index = EXCP_MMU;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+void tlb_fill(CPUState *cs, target_ulong addr, int size,
+ MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
+{
+ mb_cpu_tlb_fill(cs, addr, size, access_type, mmu_idx, false, retaddr);
}
void mb_cpu_do_interrupt(CPUState *cs)
@@ -28,25 +28,6 @@
#define D(x)
-#if !defined(CONFIG_USER_ONLY)
-
-/* Try to fill the TLB and return an exception if error. If retaddr is
- * NULL, it means that the function was called in C code (i.e. not
- * from generated code or from helper.c)
- */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- int ret;
-
- ret = mb_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
- if (unlikely(ret)) {
- /* now we have a real cpu fault */
- cpu_loop_exit_restore(cs, retaddr);
- }
-}
-#endif
-
void helper_put(uint32_t id, uint32_t ctrl, uint32_t data)
{
int test = ctrl & STREAM_TEST;