@@ -674,19 +674,23 @@ static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
}
}
-static bool gen_check_io(DisasContext *s, MemOp ot, uint32_t svm_flags)
+/*
+ * Validate that access to [port, port + 1<<ot) is allowed.
+ * Raise #GP, or VMM exit if not.
+ */
+static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
+ uint32_t svm_flags)
{
- tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
switch (ot) {
case MO_8:
- gen_helper_check_iob(cpu_env, s->tmp2_i32);
+ gen_helper_check_iob(cpu_env, port);
break;
case MO_16:
- gen_helper_check_iow(cpu_env, s->tmp2_i32);
+ gen_helper_check_iow(cpu_env, port);
break;
case MO_32:
- gen_helper_check_iol(cpu_env, s->tmp2_i32);
+ gen_helper_check_iol(cpu_env, port);
break;
default:
tcg_abort();
@@ -702,7 +706,7 @@ static bool gen_check_io(DisasContext *s, MemOp ot, uint32_t svm_flags)
svm_flags |= SVM_IOIO_REP_MASK;
}
svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
- gen_helper_svm_check_io(cpu_env, s->tmp2_i32,
+ gen_helper_svm_check_io(cpu_env, port,
tcg_constant_i32(svm_flags),
tcg_constant_i32(next_eip - cur_eip));
}
@@ -6479,8 +6483,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x6c: /* insS */
case 0x6d:
ot = mo_b_d32(b, dflag);
- tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]);
- if (!gen_check_io(s, ot, SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
+ tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
+ if (!gen_check_io(s, ot, s->tmp2_i32,
+ SVM_IOIO_TYPE_MASK | SVM_IOIO_STR_MASK)) {
break;
}
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
@@ -6499,8 +6505,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0x6e: /* outsS */
case 0x6f:
ot = mo_b_d32(b, dflag);
- tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]);
- if (!gen_check_io(s, ot, SVM_IOIO_STR_MASK)) {
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
+ tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
+ if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_STR_MASK)) {
break;
}
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
@@ -6524,14 +6531,13 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0xe5:
ot = mo_b_d32(b, dflag);
val = x86_ldub_code(env, s);
- tcg_gen_movi_tl(s->T0, val);
- if (!gen_check_io(s, ot, SVM_IOIO_TYPE_MASK)) {
+ tcg_gen_movi_i32(s->tmp2_i32, val);
+ if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
break;
}
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
- tcg_gen_movi_i32(s->tmp2_i32, val);
gen_helper_in_func(ot, s->T1, s->tmp2_i32);
gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
gen_bpt_io(s, s->tmp2_i32, ot);
@@ -6543,16 +6549,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0xe7:
ot = mo_b_d32(b, dflag);
val = x86_ldub_code(env, s);
- tcg_gen_movi_tl(s->T0, val);
- if (!gen_check_io(s, ot, 0)) {
+ tcg_gen_movi_i32(s->tmp2_i32, val);
+ if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
break;
}
- gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
-
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
- tcg_gen_movi_i32(s->tmp2_i32, val);
+ gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
gen_bpt_io(s, s->tmp2_i32, ot);
@@ -6563,14 +6567,14 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0xec:
case 0xed:
ot = mo_b_d32(b, dflag);
- tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]);
- if (!gen_check_io(s, ot, SVM_IOIO_TYPE_MASK)) {
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
+ tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
+ if (!gen_check_io(s, ot, s->tmp2_i32, SVM_IOIO_TYPE_MASK)) {
break;
}
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
- tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
gen_helper_in_func(ot, s->T1, s->tmp2_i32);
gen_op_mov_reg_v(s, ot, R_EAX, s->T1);
gen_bpt_io(s, s->tmp2_i32, ot);
@@ -6581,16 +6585,15 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu)
case 0xee:
case 0xef:
ot = mo_b_d32(b, dflag);
- tcg_gen_ext16u_tl(s->T0, cpu_regs[R_EDX]);
- if (!gen_check_io(s, ot, 0)) {
+ tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
+ tcg_gen_ext16u_i32(s->tmp2_i32, s->tmp2_i32);
+ if (!gen_check_io(s, ot, s->tmp2_i32, 0)) {
break;
}
- gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
-
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
- tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
+ gen_op_mov_v_reg(s, ot, s->T1, R_EAX);
tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
gen_bpt_io(s, s->tmp2_i32, ot);