@@ -34,6 +34,16 @@ void cpu_list_lock(void);
void cpu_list_unlock(void);
unsigned int cpu_list_generation_id_get(void);
+/**
+ * cpu_mmu_index:
+ * @env: The cpu environment
+ * @ifetch: True for code access, false for data access.
+ *
+ * Return the core mmu index for the current translation regime.
+ * This function is used by generic TCG code paths.
+ */
+int cpu_mmu_index(CPUArchState *env, bool ifetch);
+
void tcg_iommu_init_notifier_list(CPUState *cpu);
void tcg_iommu_free_notifier_list(CPUState *cpu);
@@ -389,15 +389,6 @@ enum {
#define TB_FLAG_UNALIGN (1u << 1)
-static inline int cpu_mmu_index(CPUAlphaState *env, bool ifetch)
-{
- int ret = env->flags & ENV_FLAG_PS_USER ? MMU_USER_IDX : MMU_KERNEL_IDX;
- if (env->flags & ENV_FLAG_PAL_MODE) {
- ret = MMU_KERNEL_IDX;
- }
- return ret;
-}
-
enum {
IR_V0 = 0,
IR_T0 = 1,
@@ -3268,19 +3268,6 @@ FIELD(TBFLAG_A64, NV2_MEM_BE, 36, 1)
#define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_M32, WHICH)
#define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_AM32, WHICH)
-/**
- * cpu_mmu_index:
- * @env: The cpu environment
- * @ifetch: True for code access, false for data access.
- *
- * Return the core mmu index for the current translation regime.
- * This function is used by generic TCG code paths.
- */
-static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
-{
- return EX_TBFLAG_ANY(env->hflags, MMUIDX);
-}
-
/**
* sve_vq
* @env: the cpu context
@@ -184,13 +184,6 @@ static inline void set_avr_feature(CPUAVRState *env, int feature)
env->features |= (1U << feature);
}
-#define cpu_mmu_index avr_cpu_mmu_index
-
-static inline int avr_cpu_mmu_index(CPUAVRState *env, bool ifetch)
-{
- return ifetch ? MMU_CODE_IDX : MMU_DATA_IDX;
-}
-
void avr_cpu_tcg_init(void);
int cpu_avr_exec(CPUState *cpu);
@@ -260,10 +260,6 @@ enum {
/* MMU modes definitions */
#define MMU_USER_IDX 1
-static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
-{
- return !!(env->pregs[PR_CCS] & U_FLAG);
-}
/* Support function regs. */
#define SFR_RW_GC_CFG 0][0
@@ -146,15 +146,6 @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, vaddr *pc,
*flags = hex_flags;
}
-static inline int cpu_mmu_index(CPUHexagonState *env, bool ifetch)
-{
-#ifdef CONFIG_USER_ONLY
- return MMU_USER_IDX;
-#else
-#error System mode not supported on Hexagon yet
-#endif
-}
-
typedef HexagonCPU ArchCPU;
void hexagon_translate_init(void);
@@ -281,19 +281,6 @@ static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
return hppa_is_pa20(env) ? 0 : PA10_BTLB_FIXED + PA10_BTLB_VARIABLE;
}
-static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch)
-{
-#ifdef CONFIG_USER_ONLY
- return MMU_USER_IDX;
-#else
- if (env->psw & (ifetch ? PSW_C : PSW_D)) {
- return PRIV_P_TO_MMU_IDX(env->iaoq_f & 3, env->psw & PSW_P);
- }
- /* mmu disabled */
- return env->psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
-#endif
-}
-
void hppa_translate_init(void);
#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
@@ -2296,13 +2296,6 @@ uint64_t cpu_get_tsc(CPUX86State *env);
#define MMU_NESTED_IDX 3
#define MMU_PHYS_IDX 4
-static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
-{
- return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
- (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
- ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
-}
-
static inline int cpu_mmu_index_kernel(CPUX86State *env)
{
return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
@@ -408,18 +408,6 @@ struct LoongArchCPUClass {
#define MMU_IDX_USER MMU_PLV_USER
#define MMU_IDX_DA 4
-static inline int cpu_mmu_index(CPULoongArchState *env, bool ifetch)
-{
-#ifdef CONFIG_USER_ONLY
- return MMU_IDX_USER;
-#else
- if (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG)) {
- return FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV);
- }
- return MMU_IDX_DA;
-#endif
-}
-
static inline bool is_la64(CPULoongArchState *env)
{
return FIELD_EX32(env->cpucfg[1], CPUCFG1, ARCH) == CPUCFG1_ARCH_LA64;
@@ -577,10 +577,6 @@ enum {
/* MMU modes definitions */
#define MMU_KERNEL_IDX 0
#define MMU_USER_IDX 1
-static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)
-{
- return (env->sr & SR_S) == 0 ? 1 : 0;
-}
bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
MMUAccessType access_type, int mmu_idx,
@@ -434,21 +434,6 @@ void mb_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
MemTxResult response, uintptr_t retaddr);
#endif
-static inline int cpu_mmu_index(CPUMBState *env, bool ifetch)
-{
- MicroBlazeCPU *cpu = env_archcpu(env);
-
- /* Are we in nommu mode?. */
- if (!(env->msr & MSR_VM) || !cpu->cfg.use_mmu) {
- return MMU_NOMMU_IDX;
- }
-
- if (env->msr & MSR_UM) {
- return MMU_USER_IDX;
- }
- return MMU_KERNEL_IDX;
-}
-
#ifndef CONFIG_USER_ONLY
extern const VMStateDescription vmstate_mb_cpu;
#endif
@@ -1253,11 +1253,6 @@ static inline int hflags_mmu_index(uint32_t hflags)
}
}
-static inline int cpu_mmu_index(CPUMIPSState *env, bool ifetch)
-{
- return hflags_mmu_index(env->hflags);
-}
-
#include "exec/cpu-all.h"
/* Exceptions */
@@ -270,12 +270,6 @@ void do_nios2_semihosting(CPUNios2State *env);
#define MMU_SUPERVISOR_IDX 0
#define MMU_USER_IDX 1
-static inline int cpu_mmu_index(CPUNios2State *env, bool ifetch)
-{
- return (env->ctrl[CR_STATUS] & CR_STATUS_U) ? MMU_USER_IDX :
- MMU_SUPERVISOR_IDX;
-}
-
#ifndef CONFIG_USER_ONLY
hwaddr nios2_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
@@ -361,18 +361,6 @@ static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env, vaddr *pc,
| (env->sr & (SR_SM | SR_DME | SR_IME | SR_OVE));
}
-static inline int cpu_mmu_index(CPUOpenRISCState *env, bool ifetch)
-{
- int ret = MMU_NOMMU_IDX; /* mmu is disabled */
-
- if (env->sr & (ifetch ? SR_IME : SR_DME)) {
- /* The mmu is enabled; test supervisor state. */
- ret = env->sr & SR_SM ? MMU_SUPERVISOR_IDX : MMU_USER_IDX;
- }
-
- return ret;
-}
-
static inline uint32_t cpu_get_sr(const CPUOpenRISCState *env)
{
return (env->sr
@@ -1624,14 +1624,6 @@ int ppc_dcr_write(ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
/* MMU modes definitions */
#define MMU_USER_IDX 0
-static inline int cpu_mmu_index(CPUPPCState *env, bool ifetch)
-{
-#ifdef CONFIG_USER_ONLY
- return MMU_USER_IDX;
-#else
- return (env->hflags >> (ifetch ? HFLAGS_IMMU_IDX : HFLAGS_DMMU_IDX)) & 7;
-#endif
-}
/* Compatibility modes */
#if defined(TARGET_PPC64)
@@ -498,7 +498,6 @@ target_ulong riscv_cpu_get_geilen(CPURISCVState *env);
void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen);
bool riscv_cpu_vector_enabled(CPURISCVState *env);
void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
-int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch);
G_NORETURN void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr);
@@ -507,8 +506,6 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
bool probe, uintptr_t retaddr);
char *riscv_isa_string(RISCVCPU *cpu);
-#define cpu_mmu_index riscv_cpu_mmu_index
-
#ifndef CONFIG_USER_ONLY
void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
vaddr addr, unsigned size,
@@ -158,11 +158,6 @@ static inline void cpu_get_tb_cpu_state(CPURXState *env, vaddr *pc,
*flags = FIELD_DP32(*flags, PSW, U, env->psw_u);
}
-static inline int cpu_mmu_index(CPURXState *env, bool ifetch)
-{
- return 0;
-}
-
static inline uint32_t rx_cpu_pack_psw(CPURXState *env)
{
uint32_t psw = 0;
@@ -381,37 +381,6 @@ extern const VMStateDescription vmstate_s390_cpu;
#define MMU_HOME_IDX 2
#define MMU_REAL_IDX 3
-static inline int cpu_mmu_index(CPUS390XState *env, bool ifetch)
-{
-#ifdef CONFIG_USER_ONLY
- return MMU_USER_IDX;
-#else
- if (!(env->psw.mask & PSW_MASK_DAT)) {
- return MMU_REAL_IDX;
- }
-
- if (ifetch) {
- if ((env->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) {
- return MMU_HOME_IDX;
- }
- return MMU_PRIMARY_IDX;
- }
-
- switch (env->psw.mask & PSW_MASK_ASC) {
- case PSW_ASC_PRIMARY:
- return MMU_PRIMARY_IDX;
- case PSW_ASC_SECONDARY:
- return MMU_SECONDARY_IDX;
- case PSW_ASC_HOME:
- return MMU_HOME_IDX;
- case PSW_ASC_ACCREG:
- /* Fallthrough: access register mode is not yet supported */
- default:
- abort();
- }
-#endif
-}
-
#ifdef CONFIG_TCG
#include "tcg/tcg_s390x.h"
@@ -273,16 +273,6 @@ void cpu_load_tlb(CPUSH4State * env);
/* MMU modes definitions */
#define MMU_USER_IDX 1
-static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
-{
- /* The instruction in a RTE delay slot is fetched in privileged
- mode, but executed in user mode. */
- if (ifetch && (env->flags & TB_FLAG_DELAY_SLOT_RTE)) {
- return 0;
- } else {
- return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
- }
-}
#include "exec/cpu-all.h"
@@ -708,34 +708,6 @@ static inline int cpu_supervisor_mode(CPUSPARCState *env1)
}
#endif
-static inline int cpu_mmu_index(CPUSPARCState *env, bool ifetch)
-{
-#if defined(CONFIG_USER_ONLY)
- return MMU_USER_IDX;
-#elif !defined(TARGET_SPARC64)
- if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */
- return MMU_PHYS_IDX;
- } else {
- return env->psrs;
- }
-#else
- /* IMMU or DMMU disabled. */
- if (ifetch
- ? (env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0
- : (env->lsu & DMMU_E) == 0) {
- return MMU_PHYS_IDX;
- } else if (cpu_hypervisor_mode(env)) {
- return MMU_PHYS_IDX;
- } else if (env->tl > 0) {
- return MMU_NUCLEUS_IDX;
- } else if (cpu_supervisor_mode(env)) {
- return MMU_KERNEL_IDX;
- } else {
- return MMU_USER_IDX;
- }
-#endif
-}
-
static inline int cpu_interrupts_enabled(CPUSPARCState *env1)
{
#if !defined (TARGET_SPARC64)
@@ -246,11 +246,6 @@ void fpu_set_state(CPUTriCoreState *env);
#define MMU_USER_IDX 2
-static inline int cpu_mmu_index(CPUTriCoreState *env, bool ifetch)
-{
- return 0;
-}
-
#include "exec/cpu-all.h"
FIELD(TB_FLAGS, PRIV, 0, 2)
@@ -713,11 +713,6 @@ static inline uint32_t xtensa_replicate_windowstart(CPUXtensaState *env)
/* MMU modes definitions */
#define MMU_USER_IDX 3
-static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
-{
- return xtensa_get_cring(env);
-}
-
#define XTENSA_TBFLAG_RING_MASK 0x3
#define XTENSA_TBFLAG_EXCM 0x4
#define XTENSA_TBFLAG_LITBASE 0x8
@@ -25,6 +25,14 @@
#include "cpu.h"
#include "exec/exec-all.h"
+int cpu_mmu_index(CPUAlphaState *env, bool ifetch)
+{
+ int ret = env->flags & ENV_FLAG_PS_USER ? MMU_USER_IDX : MMU_KERNEL_IDX;
+ if (env->flags & ENV_FLAG_PAL_MODE) {
+ ret = MMU_KERNEL_IDX;
+ }
+ return ret;
+}
static void alpha_cpu_set_pc(CPUState *cs, vaddr value)
{
@@ -49,6 +49,11 @@
#include "fpu/softfloat.h"
#include "cpregs.h"
+int cpu_mmu_index(CPUARMState *env, bool ifetch)
+{
+ return EX_TBFLAG_ANY(env->hflags, MMUIDX);
+}
+
static void arm_cpu_set_pc(CPUState *cs, vaddr value)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -27,6 +27,11 @@
#include "tcg/debug-assert.h"
#include "hw/qdev-properties.h"
+int cpu_mmu_index(CPUAVRState *env, bool ifetch)
+{
+ return ifetch ? MMU_CODE_IDX : MMU_DATA_IDX;
+}
+
static void avr_cpu_set_pc(CPUState *cs, vaddr value)
{
AVRCPU *cpu = AVR_CPU(cs);
@@ -27,6 +27,10 @@
#include "cpu.h"
#include "mmu.h"
+int cpu_mmu_index(CPUCRISState *env, bool ifetch)
+{
+ return !!(env->pregs[PR_CCS] & U_FLAG);
+}
static void cris_cpu_set_pc(CPUState *cs, vaddr value)
{
@@ -26,6 +26,15 @@
#include "tcg/tcg.h"
#include "exec/gdbstub.h"
+int cpu_mmu_index(CPUHexagonState *env, bool ifetch)
+{
+#ifdef CONFIG_USER_ONLY
+ return MMU_USER_IDX;
+#else
+#error System mode not supported on Hexagon yet
+#endif
+}
+
static void hexagon_v67_cpu_init(Object *obj) { }
static void hexagon_v68_cpu_init(Object *obj) { }
static void hexagon_v69_cpu_init(Object *obj) { }
@@ -28,6 +28,19 @@
#include "fpu/softfloat.h"
#include "tcg/tcg.h"
+int cpu_mmu_index(CPUHPPAState *env, bool ifetch)
+{
+#ifdef CONFIG_USER_ONLY
+ return MMU_USER_IDX;
+#else
+ if (env->psw & (ifetch ? PSW_C : PSW_D)) {
+ return PRIV_P_TO_MMU_IDX(env->iaoq_f & 3, env->psw & PSW_P);
+ }
+ /* mmu disabled */
+ return env->psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
+#endif
+}
+
static void hppa_cpu_set_pc(CPUState *cs, vaddr value)
{
HPPACPU *cpu = HPPA_CPU(cs);
@@ -46,6 +46,13 @@
#include "disas/capstone.h"
#include "cpu-internal.h"
+int cpu_mmu_index(CPUX86State *env, bool ifetch)
+{
+ return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
+ (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
+ ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
+}
+
static void x86_cpu_realizefn(DeviceState *dev, Error **errp);
/* Helpers for building CPUID[2] descriptors: */
@@ -31,6 +31,18 @@
#include "tcg/tcg.h"
#endif
+int cpu_mmu_index(CPULoongArchState *env, bool ifetch)
+{
+#ifdef CONFIG_USER_ONLY
+ return MMU_IDX_USER;
+#else
+ if (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG)) {
+ return FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV);
+ }
+ return MMU_IDX_DA;
+#endif
+}
+
const char * const regnames[32] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
@@ -24,6 +24,11 @@
#include "migration/vmstate.h"
#include "fpu/softfloat.h"
+int cpu_mmu_index(CPUM68KState *env, bool ifetch)
+{
+ return (env->sr & SR_S) == 0 ? 1 : 0;
+}
+
static void m68k_cpu_set_pc(CPUState *cs, vaddr value)
{
M68kCPU *cpu = M68K_CPU(cs);
@@ -32,6 +32,22 @@
#include "fpu/softfloat-helpers.h"
#include "tcg/tcg.h"
+int cpu_mmu_index(CPUMBState *env, bool ifetch)
+{
+ MicroBlazeCPU *cpu = env_archcpu(env);
+
+ /* Are we in nommu mode?. */
+ if (!(env->msr & MSR_VM) || !cpu->cfg.use_mmu) {
+ return MMU_NOMMU_IDX;
+ }
+
+ if (env->msr & MSR_UM) {
+ return MMU_USER_IDX;
+ }
+ return MMU_KERNEL_IDX;
+}
+
+
static const struct {
const char *name;
uint8_t version_id;
@@ -35,6 +35,11 @@
#include "semihosting/semihost.h"
#include "fpu_helper.h"
+int cpu_mmu_index(CPUMIPSState *env, bool ifetch)
+{
+ return hflags_mmu_index(env->hflags);
+}
+
const char regnames[32][3] = {
"r0", "at", "v0", "v1", "a0", "a1", "a2", "a3",
"t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
@@ -26,6 +26,12 @@
#include "gdbstub/helpers.h"
#include "hw/qdev-properties.h"
+int cpu_mmu_index(CPUNios2State *env, bool ifetch)
+{
+ return (env->ctrl[CR_STATUS] & CR_STATUS_U) ? MMU_USER_IDX :
+ MMU_SUPERVISOR_IDX;
+}
+
static void nios2_cpu_set_pc(CPUState *cs, vaddr value)
{
Nios2CPU *cpu = NIOS2_CPU(cs);
@@ -25,6 +25,18 @@
#include "fpu/softfloat-helpers.h"
#include "tcg/tcg.h"
+int cpu_mmu_index(CPUOpenRISCState *env, bool ifetch)
+{
+ int ret = MMU_NOMMU_IDX; /* mmu is disabled */
+
+ if (env->sr & (ifetch ? SR_IME : SR_DME)) {
+ /* The mmu is enabled; test supervisor state. */
+ ret = env->sr & SR_SM ? MMU_SUPERVISOR_IDX : MMU_USER_IDX;
+ }
+
+ return ret;
+}
+
static void openrisc_cpu_set_pc(CPUState *cs, vaddr value)
{
OpenRISCCPU *cpu = OPENRISC_CPU(cs);
@@ -27,6 +27,15 @@
#include "helper_regs.h"
#include "sysemu/tcg.h"
+int cpu_mmu_index(CPUPPCState *env, bool ifetch)
+{
+#ifdef CONFIG_USER_ONLY
+ return MMU_USER_IDX;
+#else
+ return (env->hflags >> (ifetch ? HFLAGS_IMMU_IDX : HFLAGS_DMMU_IDX)) & 7;
+#endif
+}
+
target_ulong cpu_read_xer(const CPUPPCState *env)
{
if (is_isa300(env)) {
@@ -33,7 +33,7 @@
#include "debug.h"
#include "tcg/oversized-guest.h"
-int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
+int cpu_mmu_index(CPURISCVState *env, bool ifetch)
{
#ifdef CONFIG_USER_ONLY
return 0;
@@ -26,6 +26,11 @@
#include "fpu/softfloat.h"
#include "tcg/debug-assert.h"
+int cpu_mmu_index(CPURXState *env, bool ifetch)
+{
+ return 0;
+}
+
static void rx_cpu_set_pc(CPUState *cs, vaddr value)
{
RXCPU *cpu = RX_CPU(cs);
@@ -43,6 +43,37 @@
#define CR0_RESET 0xE0UL
#define CR14_RESET 0xC2000000UL;
+int cpu_mmu_index(CPUS390XState *env, bool ifetch)
+{
+#ifdef CONFIG_USER_ONLY
+ return MMU_USER_IDX;
+#else
+ if (!(env->psw.mask & PSW_MASK_DAT)) {
+ return MMU_REAL_IDX;
+ }
+
+ if (ifetch) {
+ if ((env->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) {
+ return MMU_HOME_IDX;
+ }
+ return MMU_PRIMARY_IDX;
+ }
+
+ switch (env->psw.mask & PSW_MASK_ASC) {
+ case PSW_ASC_PRIMARY:
+ return MMU_PRIMARY_IDX;
+ case PSW_ASC_SECONDARY:
+ return MMU_SECONDARY_IDX;
+ case PSW_ASC_HOME:
+ return MMU_HOME_IDX;
+ case PSW_ASC_ACCREG:
+ /* Fallthrough: access register mode is not yet supported */
+ default:
+ abort();
+ }
+#endif
+}
+
#ifndef CONFIG_USER_ONLY
static bool is_early_exception_psw(uint64_t mask, uint64_t addr)
{
@@ -28,6 +28,19 @@
#include "fpu/softfloat-helpers.h"
#include "tcg/tcg.h"
+int cpu_mmu_index(CPUSH4State *env, bool ifetch)
+{
+ /*
+ * The instruction in a RTE delay slot is fetched in privileged
+ * mode, but executed in user mode.
+ */
+ if (ifetch && (env->flags & TB_FLAG_DELAY_SLOT_RTE)) {
+ return 0;
+ } else {
+ return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
+ }
+}
+
static void superh_cpu_set_pc(CPUState *cs, vaddr value)
{
SuperHCPU *cpu = SUPERH_CPU(cs);
@@ -29,6 +29,34 @@
//#define DEBUG_FEATURES
+int cpu_mmu_index(CPUSPARCState *env, bool ifetch)
+{
+#if defined(CONFIG_USER_ONLY)
+ return MMU_USER_IDX;
+#elif !defined(TARGET_SPARC64)
+ if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */
+ return MMU_PHYS_IDX;
+ } else {
+ return env->psrs;
+ }
+#else
+ /* IMMU or DMMU disabled. */
+ if (ifetch
+ ? (env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0
+ : (env->lsu & DMMU_E) == 0) {
+ return MMU_PHYS_IDX;
+ } else if (cpu_hypervisor_mode(env)) {
+ return MMU_PHYS_IDX;
+ } else if (env->tl > 0) {
+ return MMU_NUCLEUS_IDX;
+ } else if (cpu_supervisor_mode(env)) {
+ return MMU_KERNEL_IDX;
+ } else {
+ return MMU_USER_IDX;
+ }
+#endif
+}
+
static void sparc_cpu_reset_hold(Object *obj)
{
CPUState *s = CPU(obj);
@@ -24,6 +24,11 @@
#include "qemu/error-report.h"
#include "tcg/debug-assert.h"
+int cpu_mmu_index(CPUTriCoreState *env, bool ifetch)
+{
+ return 0;
+}
+
static inline void set_feature(CPUTriCoreState *env, int feature)
{
env->features |= 1ULL << feature;
@@ -39,6 +39,10 @@
#include "exec/memory.h"
#endif
+int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
+{
+ return xtensa_get_cring(env);
+}
static void xtensa_cpu_set_pc(CPUState *cs, vaddr value)
{