@@ -272,6 +272,17 @@ void cpu_loop (CPUSPARCState *env)
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
}
break;
+ case TT_UNALIGNED:
+ info.si_signo = TARGET_SIGBUS;
+ info.si_errno = 0;
+ info.si_code = TARGET_BUS_ADRALN;
+#ifdef TARGET_SPARC64
+ info._sifields._sigfault._addr = env->dmmu.sfar;
+#else
+ info._sifields._sigfault._addr = env->mmuregs[4];
+#endif
+ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ break;
case EXCP_DEBUG:
info.si_signo = TARGET_SIGTRAP;
info.si_errno = 0;
@@ -865,11 +865,11 @@ static const struct TCGCPUOps sparc_tcg_ops = {
.synchronize_from_tb = sparc_cpu_synchronize_from_tb,
.cpu_exec_interrupt = sparc_cpu_exec_interrupt,
.tlb_fill = sparc_cpu_tlb_fill,
+ .do_unaligned_access = sparc_cpu_do_unaligned_access,
#ifndef CONFIG_USER_ONLY
.do_interrupt = sparc_cpu_do_interrupt,
.do_transaction_failed = sparc_cpu_do_transaction_failed,
- .do_unaligned_access = sparc_cpu_do_unaligned_access,
#endif /* !CONFIG_USER_ONLY */
};
#endif /* CONFIG_TCG */
@@ -946,6 +946,7 @@ hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
}
return phys_addr;
}
+#endif /* CONFIG_USER_ONLY */
void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
@@ -956,7 +957,9 @@ void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
CPUSPARCState *env = &cpu->env;
#ifdef TARGET_SPARC64
+#ifndef CONFIG_USER_ONLY
env->dmmu.sfsr = build_sfsr(env, mmu_idx, access_type);
+#endif
env->dmmu.sfar = addr;
#else
env->mmuregs[4] = addr;
@@ -964,4 +967,3 @@ void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);
}
-#endif /* CONFIG_USER_ONLY */