@@ -2752,7 +2752,6 @@ static inline void cpu_get_tb_cpu_state(CPUPPCState *env, vaddr *pc,
}
#endif
-G_NORETURN void raise_exception(CPUPPCState *env, uint32_t exception);
G_NORETURN void raise_exception_err(CPUPPCState *env, uint32_t exception,
uint32_t error_code);
G_NORETURN void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
@@ -268,8 +268,6 @@ static inline void pte_invalidate(target_ulong *pte0)
#define PTE_PTEM_MASK 0x7FFFFFBF
#define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
-uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr);
-
#ifdef CONFIG_USER_ONLY
void ppc_cpu_record_sigsegv(CPUState *cs, vaddr addr,
MMUAccessType access_type,
@@ -2426,417 +2426,3 @@ bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
}
#endif /* !CONFIG_USER_ONLY */
-
-#ifdef CONFIG_TCG
-
-#ifndef CONFIG_USER_ONLY
-void helper_store_msr(CPUPPCState *env, target_ulong val)
-{
- uint32_t excp = hreg_store_msr(env, val, 0);
-
- if (excp != 0) {
- cpu_interrupt_exittb(env_cpu(env));
- raise_exception(env, excp);
- }
-}
-
-void helper_ppc_maybe_interrupt(CPUPPCState *env)
-{
- ppc_maybe_interrupt(env);
-}
-
-#ifdef TARGET_PPC64
-void helper_scv(CPUPPCState *env, uint32_t lev)
-{
- if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
- raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
- } else {
- raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
- }
-}
-
-void helper_pminsn(CPUPPCState *env, uint32_t insn)
-{
- CPUState *cs = env_cpu(env);
-
- cs->halted = 1;
-
- /* Condition for waking up at 0x100 */
- env->resume_as_sreset = (insn != PPC_PM_STOP) ||
- (env->spr[SPR_PSSCR] & PSSCR_EC);
-
- /* HDECR is not to wake from PM state, it may have already fired */
- if (env->resume_as_sreset) {
- PowerPCCPU *cpu = env_archcpu(env);
- ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
- }
-
- ppc_maybe_interrupt(env);
-}
-#endif /* TARGET_PPC64 */
-
-static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
-{
- /* MSR:POW cannot be set by any form of rfi */
- msr &= ~(1ULL << MSR_POW);
-
- /* MSR:TGPR cannot be set by any form of rfi */
- if (env->flags & POWERPC_FLAG_TGPR) {
- msr &= ~(1ULL << MSR_TGPR);
- }
-
-#ifdef TARGET_PPC64
- /* Switching to 32-bit ? Crop the nip */
- if (!msr_is_64bit(env, msr)) {
- nip = (uint32_t)nip;
- }
-#else
- nip = (uint32_t)nip;
-#endif
- /* XXX: beware: this is false if VLE is supported */
- env->nip = nip & ~((target_ulong)0x00000003);
- hreg_store_msr(env, msr, 1);
- trace_ppc_excp_rfi(env->nip, env->msr);
- /*
- * No need to raise an exception here, as rfi is always the last
- * insn of a TB
- */
- cpu_interrupt_exittb(env_cpu(env));
- /* Reset the reservation */
- env->reserve_addr = -1;
-
- /* Context synchronizing: check if TCG TLB needs flush */
- check_tlb_flush(env, false);
-}
-
-void helper_rfi(CPUPPCState *env)
-{
- do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
-}
-
-#ifdef TARGET_PPC64
-void helper_rfid(CPUPPCState *env)
-{
- /*
- * The architecture defines a number of rules for which bits can
- * change but in practice, we handle this in hreg_store_msr()
- * which will be called by do_rfi(), so there is no need to filter
- * here
- */
- do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
-}
-
-void helper_rfscv(CPUPPCState *env)
-{
- do_rfi(env, env->lr, env->ctr);
-}
-
-void helper_hrfid(CPUPPCState *env)
-{
- do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
-}
-
-void helper_rfebb(CPUPPCState *env, target_ulong s)
-{
- target_ulong msr = env->msr;
-
- /*
- * Handling of BESCR bits 32:33 according to PowerISA v3.1:
- *
- * "If BESCR 32:33 != 0b00 the instruction is treated as if
- * the instruction form were invalid."
- */
- if (env->spr[SPR_BESCR] & BESCR_INVALID) {
- raise_exception_err(env, POWERPC_EXCP_PROGRAM,
- POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
- }
-
- env->nip = env->spr[SPR_EBBRR];
-
- /* Switching to 32-bit ? Crop the nip */
- if (!msr_is_64bit(env, msr)) {
- env->nip = (uint32_t)env->spr[SPR_EBBRR];
- }
-
- if (s) {
- env->spr[SPR_BESCR] |= BESCR_GE;
- } else {
- env->spr[SPR_BESCR] &= ~BESCR_GE;
- }
-}
-
-/*
- * Triggers or queues an 'ebb_excp' EBB exception. All checks
- * but FSCR, HFSCR and msr_pr must be done beforehand.
- *
- * PowerISA v3.1 isn't clear about whether an EBB should be
- * postponed or cancelled if the EBB facility is unavailable.
- * Our assumption here is that the EBB is cancelled if both
- * FSCR and HFSCR EBB facilities aren't available.
- */
-static void do_ebb(CPUPPCState *env, int ebb_excp)
-{
- PowerPCCPU *cpu = env_archcpu(env);
-
- /*
- * FSCR_EBB and FSCR_IC_EBB are the same bits used with
- * HFSCR.
- */
- helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB);
- helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB);
-
- if (ebb_excp == POWERPC_EXCP_PERFM_EBB) {
- env->spr[SPR_BESCR] |= BESCR_PMEO;
- } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) {
- env->spr[SPR_BESCR] |= BESCR_EEO;
- }
-
- if (FIELD_EX64(env->msr, MSR, PR)) {
- powerpc_excp(cpu, ebb_excp);
- } else {
- ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1);
- }
-}
-
-void raise_ebb_perfm_exception(CPUPPCState *env)
-{
- bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE &&
- env->spr[SPR_BESCR] & BESCR_PME &&
- env->spr[SPR_BESCR] & BESCR_GE;
-
- if (!perfm_ebb_enabled) {
- return;
- }
-
- do_ebb(env, POWERPC_EXCP_PERFM_EBB);
-}
-#endif /* TARGET_PPC64 */
-
-/*****************************************************************************/
-/* Embedded PowerPC specific helpers */
-void helper_40x_rfci(CPUPPCState *env)
-{
- do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
-}
-
-void helper_rfci(CPUPPCState *env)
-{
- do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
-}
-
-void helper_rfdi(CPUPPCState *env)
-{
- /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
- do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
-}
-
-void helper_rfmci(CPUPPCState *env)
-{
- /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
- do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
-}
-
-/* Embedded.Processor Control */
-static int dbell2irq(target_ulong rb)
-{
- int msg = rb & DBELL_TYPE_MASK;
- int irq = -1;
-
- switch (msg) {
- case DBELL_TYPE_DBELL:
- irq = PPC_INTERRUPT_DOORBELL;
- break;
- case DBELL_TYPE_DBELL_CRIT:
- irq = PPC_INTERRUPT_CDOORBELL;
- break;
- case DBELL_TYPE_G_DBELL:
- case DBELL_TYPE_G_DBELL_CRIT:
- case DBELL_TYPE_G_DBELL_MC:
- /* XXX implement */
- default:
- break;
- }
-
- return irq;
-}
-
-void helper_msgclr(CPUPPCState *env, target_ulong rb)
-{
- int irq = dbell2irq(rb);
-
- if (irq < 0) {
- return;
- }
-
- ppc_set_irq(env_archcpu(env), irq, 0);
-}
-
-void helper_msgsnd(target_ulong rb)
-{
- int irq = dbell2irq(rb);
- int pir = rb & DBELL_PIRTAG_MASK;
- CPUState *cs;
-
- if (irq < 0) {
- return;
- }
-
- bql_lock();
- CPU_FOREACH(cs) {
- PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *cenv = &cpu->env;
-
- if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
- ppc_set_irq(cpu, irq, 1);
- }
- }
- bql_unlock();
-}
-
-/* Server Processor Control */
-
-static bool dbell_type_server(target_ulong rb)
-{
- /*
- * A Directed Hypervisor Doorbell message is sent only if the
- * message type is 5. All other types are reserved and the
- * instruction is a no-op
- */
- return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
-}
-
-static inline bool dbell_bcast_core(target_ulong rb)
-{
- return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE;
-}
-
-static inline bool dbell_bcast_subproc(target_ulong rb)
-{
- return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC;
-}
-
-/*
- * Send an interrupt to a thread in the same core as env).
- */
-static void msgsnd_core_tir(CPUPPCState *env, uint32_t target_tir, int irq)
-{
- PowerPCCPU *cpu = env_archcpu(env);
- CPUState *cs = env_cpu(env);
-
- if (ppc_cpu_lpar_single_threaded(cs)) {
- if (target_tir == 0) {
- ppc_set_irq(cpu, irq, 1);
- }
- } else {
- CPUState *ccs;
-
- /* Does iothread need to be locked for walking CPU list? */
- bql_lock();
- THREAD_SIBLING_FOREACH(cs, ccs) {
- PowerPCCPU *ccpu = POWERPC_CPU(ccs);
- if (target_tir == ppc_cpu_tir(ccpu)) {
- ppc_set_irq(ccpu, irq, 1);
- break;
- }
- }
- bql_unlock();
- }
-}
-
-void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
-{
- if (!dbell_type_server(rb)) {
- return;
- }
-
- ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0);
-}
-
-void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb)
-{
- int pir = rb & DBELL_PROCIDTAG_MASK;
- bool brdcast = false;
- CPUState *cs, *ccs;
- PowerPCCPU *cpu;
-
- if (!dbell_type_server(rb)) {
- return;
- }
-
- /* POWER8 msgsnd is like msgsndp (targets a thread within core) */
- if (!(env->insns_flags2 & PPC2_ISA300)) {
- msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_HDOORBELL);
- return;
- }
-
- /* POWER9 and later msgsnd is a global (targets any thread) */
- cpu = ppc_get_vcpu_by_pir(pir);
- if (!cpu) {
- return;
- }
- cs = CPU(cpu);
-
- if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) &&
- (env->flags & POWERPC_FLAG_SMT_1LPAR))) {
- brdcast = true;
- }
-
- if (ppc_cpu_core_single_threaded(cs) || !brdcast) {
- ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1);
- return;
- }
-
- /*
- * Why is bql needed for walking CPU list? Answer seems to be because ppc
- * irq handling needs it, but ppc_set_irq takes the lock itself if needed,
- * so could this be removed?
- */
- bql_lock();
- THREAD_SIBLING_FOREACH(cs, ccs) {
- ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1);
- }
- bql_unlock();
-}
-
-#ifdef TARGET_PPC64
-void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
-{
- helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
-
- if (!dbell_type_server(rb)) {
- return;
- }
-
- ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0);
-}
-
-/*
- * sends a message to another thread on the same
- * multi-threaded processor
- */
-void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
-{
- helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
-
- if (!dbell_type_server(rb)) {
- return;
- }
-
- msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_DOORBELL);
-}
-#endif /* TARGET_PPC64 */
-
-/* Single-step tracing */
-void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip)
-{
- uint32_t error_code = 0;
- if (env->insns_flags2 & PPC2_ISA207S) {
- /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */
- env->spr[SPR_POWER_SIAR] = prev_ip;
- error_code = PPC_BIT(33);
- }
- raise_exception_err(env, POWERPC_EXCP_TRACE, error_code);
-}
-
-#endif /* !CONFIG_USER_ONLY */
-#endif /* CONFIG_TCG */
@@ -17,6 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
#include "qemu/log.h"
#include "exec/cpu_ldst.h"
#include "exec/exec-all.h"
@@ -61,7 +62,7 @@ void raise_exception_err(CPUPPCState *env, uint32_t exception,
raise_exception_err_ra(env, exception, error_code, 0);
}
-void raise_exception(CPUPPCState *env, uint32_t exception)
+static G_NORETURN void raise_exception(CPUPPCState *env, uint32_t exception)
{
raise_exception_err_ra(env, exception, 0, 0);
}
@@ -209,6 +210,8 @@ HELPER_HASH(HASHCHKP, env->spr[SPR_HASHPKEYR], false, PHIE)
#ifndef CONFIG_USER_ONLY
+static uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr);
+
void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
MMUAccessType access_type,
int mmu_idx, uintptr_t retaddr)
@@ -415,7 +418,7 @@ static inline bool insn_need_byteswap(CPUArchState *env)
return !!(env->msr & ((target_ulong)1 << MSR_LE));
}
-uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr)
+static uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr)
{
uint32_t insn = cpu_ldl_code(env, addr);
@@ -497,4 +500,413 @@ void ppc_tcg_hv_emu(CPUPPCState *env, target_ulong *new_msr,
*new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
}
+void helper_store_msr(CPUPPCState *env, target_ulong val)
+{
+ uint32_t excp = hreg_store_msr(env, val, 0);
+
+ if (excp != 0) {
+ cpu_interrupt_exittb(env_cpu(env));
+ raise_exception(env, excp);
+ }
+}
+
+void helper_ppc_maybe_interrupt(CPUPPCState *env)
+{
+ ppc_maybe_interrupt(env);
+}
+
+#ifdef TARGET_PPC64
+void helper_scv(CPUPPCState *env, uint32_t lev)
+{
+ if (env->spr[SPR_FSCR] & (1ull << FSCR_SCV)) {
+ raise_exception_err(env, POWERPC_EXCP_SYSCALL_VECTORED, lev);
+ } else {
+ raise_exception_err(env, POWERPC_EXCP_FU, FSCR_IC_SCV);
+ }
+}
+
+void helper_pminsn(CPUPPCState *env, uint32_t insn)
+{
+ CPUState *cs = env_cpu(env);
+
+ cs->halted = 1;
+
+ /* Condition for waking up at 0x100 */
+ env->resume_as_sreset = (insn != PPC_PM_STOP) ||
+ (env->spr[SPR_PSSCR] & PSSCR_EC);
+
+ /* HDECR is not to wake from PM state, it may have already fired */
+ if (env->resume_as_sreset) {
+ PowerPCCPU *cpu = env_archcpu(env);
+ ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
+ }
+
+ ppc_maybe_interrupt(env);
+}
+
+#endif /* TARGET_PPC64 */
+
+static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
+{
+ /* MSR:POW cannot be set by any form of rfi */
+ msr &= ~(1ULL << MSR_POW);
+
+ /* MSR:TGPR cannot be set by any form of rfi */
+ if (env->flags & POWERPC_FLAG_TGPR) {
+ msr &= ~(1ULL << MSR_TGPR);
+ }
+
+#ifdef TARGET_PPC64
+ /* Switching to 32-bit ? Crop the nip */
+ if (!msr_is_64bit(env, msr)) {
+ nip = (uint32_t)nip;
+ }
+#else
+ nip = (uint32_t)nip;
+#endif
+ /* XXX: beware: this is false if VLE is supported */
+ env->nip = nip & ~((target_ulong)0x00000003);
+ hreg_store_msr(env, msr, 1);
+ trace_ppc_excp_rfi(env->nip, env->msr);
+ /*
+ * No need to raise an exception here, as rfi is always the last
+ * insn of a TB
+ */
+ cpu_interrupt_exittb(env_cpu(env));
+ /* Reset the reservation */
+ env->reserve_addr = -1;
+
+ /* Context synchronizing: check if TCG TLB needs flush */
+ check_tlb_flush(env, false);
+}
+
+void helper_rfi(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
+}
+
+#ifdef TARGET_PPC64
+void helper_rfid(CPUPPCState *env)
+{
+ /*
+ * The architecture defines a number of rules for which bits can
+ * change but in practice, we handle this in hreg_store_msr()
+ * which will be called by do_rfi(), so there is no need to filter
+ * here
+ */
+ do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
+}
+
+void helper_rfscv(CPUPPCState *env)
+{
+ do_rfi(env, env->lr, env->ctr);
+}
+
+void helper_hrfid(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
+}
+
+void helper_rfebb(CPUPPCState *env, target_ulong s)
+{
+ target_ulong msr = env->msr;
+
+ /*
+ * Handling of BESCR bits 32:33 according to PowerISA v3.1:
+ *
+ * "If BESCR 32:33 != 0b00 the instruction is treated as if
+ * the instruction form were invalid."
+ */
+ if (env->spr[SPR_BESCR] & BESCR_INVALID) {
+ raise_exception_err(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
+ }
+
+ env->nip = env->spr[SPR_EBBRR];
+
+ /* Switching to 32-bit ? Crop the nip */
+ if (!msr_is_64bit(env, msr)) {
+ env->nip = (uint32_t)env->spr[SPR_EBBRR];
+ }
+
+ if (s) {
+ env->spr[SPR_BESCR] |= BESCR_GE;
+ } else {
+ env->spr[SPR_BESCR] &= ~BESCR_GE;
+ }
+}
+
+/*
+ * Triggers or queues an 'ebb_excp' EBB exception. All checks
+ * but FSCR, HFSCR and msr_pr must be done beforehand.
+ *
+ * PowerISA v3.1 isn't clear about whether an EBB should be
+ * postponed or cancelled if the EBB facility is unavailable.
+ * Our assumption here is that the EBB is cancelled if both
+ * FSCR and HFSCR EBB facilities aren't available.
+ */
+static void do_ebb(CPUPPCState *env, int ebb_excp)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+
+ /*
+ * FSCR_EBB and FSCR_IC_EBB are the same bits used with
+ * HFSCR.
+ */
+ helper_fscr_facility_check(env, FSCR_EBB, 0, FSCR_IC_EBB);
+ helper_hfscr_facility_check(env, FSCR_EBB, "EBB", FSCR_IC_EBB);
+
+ if (ebb_excp == POWERPC_EXCP_PERFM_EBB) {
+ env->spr[SPR_BESCR] |= BESCR_PMEO;
+ } else if (ebb_excp == POWERPC_EXCP_EXTERNAL_EBB) {
+ env->spr[SPR_BESCR] |= BESCR_EEO;
+ }
+
+ if (FIELD_EX64(env->msr, MSR, PR)) {
+ powerpc_excp(cpu, ebb_excp);
+ } else {
+ ppc_set_irq(cpu, PPC_INTERRUPT_EBB, 1);
+ }
+}
+
+void raise_ebb_perfm_exception(CPUPPCState *env)
+{
+ bool perfm_ebb_enabled = env->spr[SPR_POWER_MMCR0] & MMCR0_EBE &&
+ env->spr[SPR_BESCR] & BESCR_PME &&
+ env->spr[SPR_BESCR] & BESCR_GE;
+
+ if (!perfm_ebb_enabled) {
+ return;
+ }
+
+ do_ebb(env, POWERPC_EXCP_PERFM_EBB);
+}
+#endif /* TARGET_PPC64 */
+
+/*****************************************************************************/
+/* Embedded PowerPC specific helpers */
+void helper_40x_rfci(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
+}
+
+void helper_rfci(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
+}
+
+void helper_rfdi(CPUPPCState *env)
+{
+ /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
+ do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
+}
+
+void helper_rfmci(CPUPPCState *env)
+{
+ /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
+ do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
+}
+
+/* Embedded.Processor Control */
+static int dbell2irq(target_ulong rb)
+{
+ int msg = rb & DBELL_TYPE_MASK;
+ int irq = -1;
+
+ switch (msg) {
+ case DBELL_TYPE_DBELL:
+ irq = PPC_INTERRUPT_DOORBELL;
+ break;
+ case DBELL_TYPE_DBELL_CRIT:
+ irq = PPC_INTERRUPT_CDOORBELL;
+ break;
+ case DBELL_TYPE_G_DBELL:
+ case DBELL_TYPE_G_DBELL_CRIT:
+ case DBELL_TYPE_G_DBELL_MC:
+ /* XXX implement */
+ default:
+ break;
+ }
+
+ return irq;
+}
+
+void helper_msgclr(CPUPPCState *env, target_ulong rb)
+{
+ int irq = dbell2irq(rb);
+
+ if (irq < 0) {
+ return;
+ }
+
+ ppc_set_irq(env_archcpu(env), irq, 0);
+}
+
+void helper_msgsnd(target_ulong rb)
+{
+ int irq = dbell2irq(rb);
+ int pir = rb & DBELL_PIRTAG_MASK;
+ CPUState *cs;
+
+ if (irq < 0) {
+ return;
+ }
+
+ bql_lock();
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *cenv = &cpu->env;
+
+ if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
+ ppc_set_irq(cpu, irq, 1);
+ }
+ }
+ bql_unlock();
+}
+
+/* Server Processor Control */
+
+static bool dbell_type_server(target_ulong rb)
+{
+ /*
+ * A Directed Hypervisor Doorbell message is sent only if the
+ * message type is 5. All other types are reserved and the
+ * instruction is a no-op
+ */
+ return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER;
+}
+
+static inline bool dbell_bcast_core(target_ulong rb)
+{
+ return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE;
+}
+
+static inline bool dbell_bcast_subproc(target_ulong rb)
+{
+ return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC;
+}
+
+/*
+ * Send an interrupt to a thread in the same core as env).
+ */
+static void msgsnd_core_tir(CPUPPCState *env, uint32_t target_tir, int irq)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ CPUState *cs = env_cpu(env);
+
+ if (ppc_cpu_lpar_single_threaded(cs)) {
+ if (target_tir == 0) {
+ ppc_set_irq(cpu, irq, 1);
+ }
+ } else {
+ CPUState *ccs;
+
+ /* Does iothread need to be locked for walking CPU list? */
+ bql_lock();
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ PowerPCCPU *ccpu = POWERPC_CPU(ccs);
+ if (target_tir == ppc_cpu_tir(ccpu)) {
+ ppc_set_irq(ccpu, irq, 1);
+ break;
+ }
+ }
+ bql_unlock();
+ }
+}
+
+void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb)
+{
+ if (!dbell_type_server(rb)) {
+ return;
+ }
+
+ ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0);
+}
+
+void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb)
+{
+ int pir = rb & DBELL_PROCIDTAG_MASK;
+ bool brdcast = false;
+ CPUState *cs, *ccs;
+ PowerPCCPU *cpu;
+
+ if (!dbell_type_server(rb)) {
+ return;
+ }
+
+ /* POWER8 msgsnd is like msgsndp (targets a thread within core) */
+ if (!(env->insns_flags2 & PPC2_ISA300)) {
+ msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_HDOORBELL);
+ return;
+ }
+
+ /* POWER9 and later msgsnd is a global (targets any thread) */
+ cpu = ppc_get_vcpu_by_pir(pir);
+ if (!cpu) {
+ return;
+ }
+ cs = CPU(cpu);
+
+ if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) &&
+ (env->flags & POWERPC_FLAG_SMT_1LPAR))) {
+ brdcast = true;
+ }
+
+ if (ppc_cpu_core_single_threaded(cs) || !brdcast) {
+ ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1);
+ return;
+ }
+
+ /*
+ * Why is bql needed for walking CPU list? Answer seems to be because ppc
+ * irq handling needs it, but ppc_set_irq takes the lock itself if needed,
+ * so could this be removed?
+ */
+ bql_lock();
+ THREAD_SIBLING_FOREACH(cs, ccs) {
+ ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1);
+ }
+ bql_unlock();
+}
+
+#ifdef TARGET_PPC64
+void helper_book3s_msgclrp(CPUPPCState *env, target_ulong rb)
+{
+ helper_hfscr_facility_check(env, HFSCR_MSGP, "msgclrp", HFSCR_IC_MSGP);
+
+ if (!dbell_type_server(rb)) {
+ return;
+ }
+
+ ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_DOORBELL, 0);
+}
+
+/*
+ * sends a message to another thread on the same
+ * multi-threaded processor
+ */
+void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb)
+{
+ helper_hfscr_facility_check(env, HFSCR_MSGP, "msgsndp", HFSCR_IC_MSGP);
+
+ if (!dbell_type_server(rb)) {
+ return;
+ }
+
+ msgsnd_core_tir(env, rb & PPC_BITMASK(57, 63), PPC_INTERRUPT_DOORBELL);
+}
+#endif /* TARGET_PPC64 */
+
+/* Single-step tracing */
+void helper_book3s_trace(CPUPPCState *env, target_ulong prev_ip)
+{
+ uint32_t error_code = 0;
+ if (env->insns_flags2 & PPC2_ISA207S) {
+ /* Load/store reporting, SRR1[35, 36] and SDAR, are not implemented. */
+ env->spr[SPR_POWER_SIAR] = prev_ip;
+ error_code = PPC_BIT(33);
+ }
+ raise_exception_err(env, POWERPC_EXCP_TRACE, error_code);
+}
+
#endif /* !CONFIG_USER_ONLY */
We don't need to expose ppc_ldl_code() anymore. Similarly, make raise_exception() scope local. Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> --- target/ppc/cpu.h | 1 - target/ppc/internal.h | 2 - target/ppc/excp_helper.c | 414 ---------------------------------- target/ppc/tcg-excp_helper.c | 416 ++++++++++++++++++++++++++++++++++- 4 files changed, 414 insertions(+), 419 deletions(-)