@@ -342,19 +342,32 @@ extern const VMStateDescription vmstate_s390_cpu;
/* tb flags */
-#define FLAG_MASK_PSW_SHIFT 31
-#define FLAG_MASK_PER (PSW_MASK_PER >> FLAG_MASK_PSW_SHIFT)
-#define FLAG_MASK_DAT (PSW_MASK_DAT >> FLAG_MASK_PSW_SHIFT)
-#define FLAG_MASK_PSTATE (PSW_MASK_PSTATE >> FLAG_MASK_PSW_SHIFT)
-#define FLAG_MASK_ASC (PSW_MASK_ASC >> FLAG_MASK_PSW_SHIFT)
-#define FLAG_MASK_64 (PSW_MASK_64 >> FLAG_MASK_PSW_SHIFT)
-#define FLAG_MASK_32 (PSW_MASK_32 >> FLAG_MASK_PSW_SHIFT)
-#define FLAG_MASK_PSW (FLAG_MASK_PER | FLAG_MASK_DAT | FLAG_MASK_PSTATE \
- | FLAG_MASK_ASC | FLAG_MASK_64 | FLAG_MASK_32)
+#define FLAG_MASK_PSW_SHIFT 31
+#define FLAG_MASK_32 0x00000001u
+#define FLAG_MASK_64 0x00000002u
+#define FLAG_MASK_AFP 0x00000004u
+#define FLAG_MASK_VECTOR 0x00000008u
+#define FLAG_MASK_ASC 0x00018000u
+#define FLAG_MASK_PSTATE 0x00020000u
+#define FLAG_MASK_PER_IFETCH_NULLIFY 0x01000000u
+#define FLAG_MASK_DAT 0x08000000u
+#define FLAG_MASK_PER_STORE_REAL 0x20000000u
+#define FLAG_MASK_PER_IFETCH 0x40000000u
+#define FLAG_MASK_PER_BRANCH 0x80000000u
-/* we'll use some unused PSW positions to store CR flags in tb flags */
-#define FLAG_MASK_AFP (PSW_MASK_UNUSED_2 >> FLAG_MASK_PSW_SHIFT)
-#define FLAG_MASK_VECTOR (PSW_MASK_UNUSED_3 >> FLAG_MASK_PSW_SHIFT)
+QEMU_BUILD_BUG_ON(FLAG_MASK_32 != PSW_MASK_32 >> FLAG_MASK_PSW_SHIFT);
+QEMU_BUILD_BUG_ON(FLAG_MASK_64 != PSW_MASK_64 >> FLAG_MASK_PSW_SHIFT);
+QEMU_BUILD_BUG_ON(FLAG_MASK_ASC != PSW_MASK_ASC >> FLAG_MASK_PSW_SHIFT);
+QEMU_BUILD_BUG_ON(FLAG_MASK_PSTATE != PSW_MASK_PSTATE >> FLAG_MASK_PSW_SHIFT);
+QEMU_BUILD_BUG_ON(FLAG_MASK_DAT != PSW_MASK_DAT >> FLAG_MASK_PSW_SHIFT);
+
+#define FLAG_MASK_PSW (FLAG_MASK_DAT | FLAG_MASK_PSTATE | \
+ FLAG_MASK_ASC | FLAG_MASK_64 | FLAG_MASK_32)
+#define FLAG_MASK_CR9 (FLAG_MASK_PER_BRANCH | FLAG_MASK_PER_IFETCH)
+#define FLAG_MASK_PER (FLAG_MASK_PER_BRANCH | \
+ FLAG_MASK_PER_IFETCH | \
+ FLAG_MASK_PER_IFETCH_NULLIFY | \
+ FLAG_MASK_PER_STORE_REAL)
/* Control register 0 bits */
#define CR0_LOWPROT 0x0000000010000000ULL
@@ -431,6 +444,11 @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
#define PER_CR9_CONTROL_TRANSACTION_SUPRESS 0x00400000
#define PER_CR9_CONTROL_STORAGE_ALTERATION 0x00200000
+QEMU_BUILD_BUG_ON(FLAG_MASK_PER_BRANCH != PER_CR9_EVENT_BRANCH);
+QEMU_BUILD_BUG_ON(FLAG_MASK_PER_IFETCH != PER_CR9_EVENT_IFETCH);
+QEMU_BUILD_BUG_ON(FLAG_MASK_PER_IFETCH_NULLIFY !=
+ PER_CR9_EVENT_IFETCH_NULLIFICATION);
+
/* PER bits from the PER CODE/ATMID/AI in lowcore */
#define PER_CODE_EVENT_BRANCH 0x8000
#define PER_CODE_EVENT_IFETCH 0x4000
@@ -325,8 +325,10 @@ static void s390_cpu_reset_full(DeviceState *dev)
#include "hw/core/tcg-cpu-ops.h"
void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags)
+ uint64_t *cs_base, uint32_t *pflags)
{
+ uint32_t flags;
+
if (env->psw.addr & 1) {
/*
* Instructions must be at even addresses.
@@ -335,15 +337,27 @@ void cpu_get_tb_cpu_state(CPUS390XState *env, vaddr *pc,
env->int_pgm_ilen = 2; /* see s390_cpu_tlb_fill() */
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, 0);
}
+
*pc = env->psw.addr;
*cs_base = env->ex_value;
- *flags = (env->psw.mask >> FLAG_MASK_PSW_SHIFT) & FLAG_MASK_PSW;
+
+ flags = (env->psw.mask >> FLAG_MASK_PSW_SHIFT) & FLAG_MASK_PSW;
+ if (env->psw.mask & PSW_MASK_PER) {
+ flags |= env->cregs[9] & (FLAG_MASK_PER_BRANCH |
+ FLAG_MASK_PER_IFETCH |
+ FLAG_MASK_PER_IFETCH_NULLIFY);
+ if ((env->cregs[9] & PER_CR9_EVENT_STORE) &&
+ (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
+ flags |= FLAG_MASK_PER_STORE_REAL;
+ }
+ }
if (env->cregs[0] & CR0_AFP) {
- *flags |= FLAG_MASK_AFP;
+ flags |= FLAG_MASK_AFP;
}
if (env->cregs[0] & CR0_VECTOR) {
- *flags |= FLAG_MASK_VECTOR;
+ flags |= FLAG_MASK_VECTOR;
}
+ *pflags = flags;
}
static const TCGCPUOps s390_tcg_ops = {
@@ -627,18 +627,16 @@ static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to)
{
- if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) {
- if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
- || get_per_in_range(env, to)) {
- env->per_address = from;
- env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
- }
+ if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
+ || get_per_in_range(env, to)) {
+ env->per_address = from;
+ env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
}
}
void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
{
- if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) {
+ if (get_per_in_range(env, addr)) {
env->per_address = addr;
env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
@@ -659,12 +657,9 @@ void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
void HELPER(per_store_real)(CPUS390XState *env)
{
- if ((env->cregs[9] & PER_CR9_EVENT_STORE) &&
- (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
- /* PSW is saved just before calling the helper. */
- env->per_address = env->psw.addr;
- env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
- }
+ /* PSW is saved just before calling the helper. */
+ env->per_address = env->psw.addr;
+ env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
}
#endif
@@ -348,7 +348,7 @@ static void per_branch(DisasContext *s, bool to_next)
#ifndef CONFIG_USER_ONLY
tcg_gen_movi_i64(gbea, s->base.pc_next);
- if (s->base.tb->flags & FLAG_MASK_PER) {
+ if (s->base.tb->flags & FLAG_MASK_PER_BRANCH) {
TCGv_i64 next_pc = to_next ? tcg_constant_i64(s->pc_tmp) : psw_addr;
gen_helper_per_branch(tcg_env, gbea, next_pc);
}
@@ -359,7 +359,7 @@ static void per_branch_cond(DisasContext *s, TCGCond cond,
TCGv_i64 arg1, TCGv_i64 arg2)
{
#ifndef CONFIG_USER_ONLY
- if (s->base.tb->flags & FLAG_MASK_PER) {
+ if (s->base.tb->flags & FLAG_MASK_PER_BRANCH) {
TCGLabel *lab = gen_new_label();
tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
@@ -658,7 +658,7 @@ static void gen_op_calc_cc(DisasContext *s)
static bool use_goto_tb(DisasContext *s, uint64_t dest)
{
- if (unlikely(s->base.tb->flags & FLAG_MASK_PER)) {
+ if (unlikely(s->base.tb->flags & FLAG_MASK_PER_BRANCH)) {
return false;
}
return translator_use_goto_tb(&s->base, dest);
@@ -4411,7 +4411,7 @@ static DisasJumpType op_stura(DisasContext *s, DisasOps *o)
{
tcg_gen_qemu_st_tl(o->in1, o->in2, MMU_REAL_IDX, s->insn->data);
- if (s->base.tb->flags & FLAG_MASK_PER) {
+ if (s->base.tb->flags & FLAG_MASK_PER_STORE_REAL) {
update_psw_addr(s);
gen_helper_per_store_real(tcg_env);
}
@@ -6325,7 +6325,7 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
}
#ifndef CONFIG_USER_ONLY
- if (s->base.tb->flags & FLAG_MASK_PER) {
+ if (s->base.tb->flags & FLAG_MASK_PER_IFETCH) {
TCGv_i64 addr = tcg_constant_i64(s->base.pc_next);
gen_helper_per_ifetch(tcg_env, addr);
}
Record successful-branching, instruction-fetching, and store-using-real-address. The other PER bits are not used during translation. Having checked these at translation time, we can remove runtime tests from the helpers. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/s390x/cpu.h | 42 ++++++++++++++++++++++++---------- target/s390x/cpu.c | 22 ++++++++++++++---- target/s390x/tcg/misc_helper.c | 21 +++++++---------- target/s390x/tcg/translate.c | 10 ++++---- 4 files changed, 61 insertions(+), 34 deletions(-)