@@ -520,7 +520,7 @@ static inline target_ulong get_memio_eip(CPUX86State *env)
}
/* Per x86_restore_state_to_opc. */
- if (TARGET_TB_PCREL) {
+ if (cs->tcg_cflags & CF_PCREL) {
return (env->eip & TARGET_PAGE_MASK) | data[0];
} else {
return data[0] - env->segs[R_CS].base;
@@ -49,8 +49,8 @@ static void x86_cpu_exec_exit(CPUState *cs)
static void x86_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
- /* The instruction pointer is always up to date with TARGET_TB_PCREL. */
- if (!TARGET_TB_PCREL) {
+ /* The instruction pointer is always up to date with CF_PCREL. */
+ if (!(tb_cflags(tb) & CF_PCREL)) {
CPUX86State *env = cs->env_ptr;
env->eip = tb_pc(tb) - tb->cs_base;
}
@@ -64,7 +64,7 @@ static void x86_restore_state_to_opc(CPUState *cs,
CPUX86State *env = &cpu->env;
int cc_op = data[1];
- if (TARGET_TB_PCREL) {
+ if (tb_cflags(tb) & CF_PCREL) {
env->eip = (env->eip & TARGET_PAGE_MASK) | data[0];
} else {
env->eip = data[0] - tb->cs_base;
@@ -545,7 +545,7 @@ static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
static void gen_update_eip_cur(DisasContext *s)
{
assert(s->pc_save != -1);
- if (TARGET_TB_PCREL) {
+ if (tb_cflags(s->base.tb) & CF_PCREL) {
tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
} else {
tcg_gen_movi_tl(cpu_eip, s->base.pc_next - s->cs_base);
@@ -556,7 +556,7 @@ static void gen_update_eip_cur(DisasContext *s)
static void gen_update_eip_next(DisasContext *s)
{
assert(s->pc_save != -1);
- if (TARGET_TB_PCREL) {
+ if (tb_cflags(s->base.tb) & CF_PCREL) {
tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
} else {
tcg_gen_movi_tl(cpu_eip, s->pc - s->cs_base);
@@ -588,7 +588,7 @@ static TCGv_i32 eip_next_i32(DisasContext *s)
if (CODE64(s)) {
return tcg_constant_i32(-1);
}
- if (TARGET_TB_PCREL) {
+ if (tb_cflags(s->base.tb) & CF_PCREL) {
TCGv_i32 ret = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(ret, cpu_eip);
tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
@@ -601,7 +601,7 @@ static TCGv_i32 eip_next_i32(DisasContext *s)
static TCGv eip_next_tl(DisasContext *s)
{
assert(s->pc_save != -1);
- if (TARGET_TB_PCREL) {
+ if (tb_cflags(s->base.tb) & CF_PCREL) {
TCGv ret = tcg_temp_new();
tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
return ret;
@@ -613,7 +613,7 @@ static TCGv eip_next_tl(DisasContext *s)
static TCGv eip_cur_tl(DisasContext *s)
{
assert(s->pc_save != -1);
- if (TARGET_TB_PCREL) {
+ if (tb_cflags(s->base.tb) & CF_PCREL) {
TCGv ret = tcg_temp_new();
tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
return ret;
@@ -1830,7 +1830,7 @@ static void gen_rot_rm_T1(DisasContext *s, MemOp ot, int op1, int is_right)
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
- /* The CC_OP value is no longer predictable. */
+ /* The CC_OP value is no longer predictable. */
set_cc_op(s, CC_OP_DYNAMIC);
}
@@ -1923,7 +1923,7 @@ static void gen_rotc_rm_T1(DisasContext *s, MemOp ot, int op1,
gen_op_ld_v(s, ot, s->T0, s->A0);
else
gen_op_mov_v_reg(s, ot, s->T0, op1);
-
+
if (is_right) {
switch (ot) {
case MO_8:
@@ -2319,7 +2319,7 @@ static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
ea = cpu_regs[a.base];
}
if (!ea) {
- if (TARGET_TB_PCREL && a.base == -2) {
+ if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
/* With cpu_eip ~= pc_save, the expression is pc-relative. */
tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
} else {
@@ -2867,7 +2867,7 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
if (!CODE64(s)) {
if (ot == MO_16) {
mask = 0xffff;
- if (TARGET_TB_PCREL && CODE32(s)) {
+ if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
use_goto_tb = false;
}
} else {
@@ -2879,7 +2879,7 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
gen_update_cc_op(s);
set_cc_op(s, CC_OP_DYNAMIC);
- if (TARGET_TB_PCREL) {
+ if (tb_cflags(s->base.tb) & CF_PCREL) {
tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
/*
* If we can prove the branch does not leave the page and we have
@@ -2896,13 +2896,13 @@ static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
translator_use_goto_tb(&s->base, new_eip + s->cs_base)) {
/* jump to same page: we can use a direct jump */
tcg_gen_goto_tb(tb_num);
- if (!TARGET_TB_PCREL) {
+ if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
tcg_gen_movi_tl(cpu_eip, new_eip);
}
tcg_gen_exit_tb(s->base.tb, tb_num);
s->base.is_jmp = DISAS_NORETURN;
} else {
- if (!TARGET_TB_PCREL) {
+ if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
tcg_gen_movi_tl(cpu_eip, new_eip);
}
if (s->jmp_opt) {
@@ -7065,7 +7065,7 @@ static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
target_ulong pc_arg = dc->base.pc_next;
dc->prev_insn_end = tcg_last_op();
- if (TARGET_TB_PCREL) {
+ if (tb_cflags(dcbase->tb) & CF_PCREL) {
pc_arg -= dc->cs_base;
pc_arg &= ~TARGET_PAGE_MASK;
}