@@ -1,4 +1,5 @@
#ifdef CONFIG_PLUGIN
-DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, ptr)
+DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb_no_wg, TCG_CALL_NO_WG | TCG_CALL_PLUGIN, void, i32, ptr)
+DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb_no_rwg, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, ptr)
DEF_HELPER_FLAGS_4(plugin_vcpu_mem_cb, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, i32, i64, ptr)
#endif
@@ -73,6 +73,7 @@ enum plugin_dyn_cb_type {
enum plugin_dyn_cb_subtype {
PLUGIN_CB_REGULAR,
+ PLUGIN_CB_REGULAR_R,
PLUGIN_CB_INLINE,
PLUGIN_N_CB_SUBTYPES,
};
@@ -79,6 +79,7 @@ enum plugin_gen_from {
enum plugin_gen_cb {
PLUGIN_GEN_CB_UDATA,
+ PLUGIN_GEN_CB_UDATA_R,
PLUGIN_GEN_CB_INLINE,
PLUGIN_GEN_CB_MEM,
PLUGIN_GEN_ENABLE_MEM_HELPER,
@@ -90,7 +91,10 @@ enum plugin_gen_cb {
* These helpers are stubs that get dynamically switched out for calls
* direct to the plugin if they are subscribed to.
*/
-void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata)
+void HELPER(plugin_vcpu_udata_cb_no_wg)(uint32_t cpu_index, void *udata)
+{ }
+
+void HELPER(plugin_vcpu_udata_cb_no_rwg)(uint32_t cpu_index, void *udata)
{ }
void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
@@ -98,7 +102,7 @@ void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
void *userdata)
{ }
-static void gen_empty_udata_cb(void)
+static void gen_empty_udata_cb(void (*gen_helper)(TCGv_i32, TCGv_ptr))
{
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
TCGv_ptr udata = tcg_temp_ebb_new_ptr();
@@ -106,12 +110,22 @@ static void gen_empty_udata_cb(void)
tcg_gen_movi_ptr(udata, 0);
tcg_gen_ld_i32(cpu_index, tcg_env,
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
- gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
+ gen_helper(cpu_index, udata);
tcg_temp_free_ptr(udata);
tcg_temp_free_i32(cpu_index);
}
+static void gen_empty_udata_cb_no_wg(void)
+{
+ gen_empty_udata_cb(gen_helper_plugin_vcpu_udata_cb_no_wg);
+}
+
+static void gen_empty_udata_cb_no_rwg(void)
+{
+ gen_empty_udata_cb(gen_helper_plugin_vcpu_udata_cb_no_rwg);
+}
+
/*
* For now we only support addi_i64.
* When we support more ops, we can generate one empty inline cb for each.
@@ -192,7 +206,8 @@ static void plugin_gen_empty_callback(enum plugin_gen_from from)
gen_empty_mem_helper);
/* fall through */
case PLUGIN_GEN_FROM_TB:
- gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb);
+ gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb_no_rwg);
+ gen_wrapped(from, PLUGIN_GEN_CB_UDATA_R, gen_empty_udata_cb_no_wg);
gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb);
break;
default:
@@ -588,6 +603,12 @@ static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb,
inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op);
}
+static void plugin_gen_tb_udata_r(const struct qemu_plugin_tb *ptb,
+ TCGOp *begin_op)
+{
+ inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR_R], begin_op);
+}
+
static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb,
TCGOp *begin_op)
{
@@ -602,6 +623,14 @@ static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb,
inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op);
}
+static void plugin_gen_insn_udata_r(const struct qemu_plugin_tb *ptb,
+ TCGOp *begin_op, int insn_idx)
+{
+ struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx);
+
+ inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR_R], begin_op);
+}
+
static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb,
TCGOp *begin_op, int insn_idx)
{
@@ -721,6 +750,9 @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
case PLUGIN_GEN_CB_UDATA:
plugin_gen_tb_udata(plugin_tb, op);
break;
+ case PLUGIN_GEN_CB_UDATA_R:
+ plugin_gen_tb_udata_r(plugin_tb, op);
+ break;
case PLUGIN_GEN_CB_INLINE:
plugin_gen_tb_inline(plugin_tb, op);
break;
@@ -737,6 +769,9 @@ static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
case PLUGIN_GEN_CB_UDATA:
plugin_gen_insn_udata(plugin_tb, op, insn_idx);
break;
+ case PLUGIN_GEN_CB_UDATA_R:
+ plugin_gen_insn_udata_r(plugin_tb, op, insn_idx);
+ break;
case PLUGIN_GEN_CB_INLINE:
plugin_gen_insn_inline(plugin_tb, op, insn_idx);
break;
@@ -89,7 +89,11 @@ void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb,
void *udata)
{
if (!tb->mem_only) {
- plugin_register_dyn_cb__udata(&tb->cbs[PLUGIN_CB_REGULAR],
+ int index = flags == QEMU_PLUGIN_CB_R_REGS ||
+ flags == QEMU_PLUGIN_CB_RW_REGS ?
+ PLUGIN_CB_REGULAR_R : PLUGIN_CB_REGULAR;
+
+ plugin_register_dyn_cb__udata(&tb->cbs[index],
cb, flags, udata);
}
}
@@ -109,7 +113,11 @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn,
void *udata)
{
if (!insn->mem_only) {
- plugin_register_dyn_cb__udata(&insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR],
+ int index = flags == QEMU_PLUGIN_CB_R_REGS ||
+ flags == QEMU_PLUGIN_CB_RW_REGS ?
+ PLUGIN_CB_REGULAR_R : PLUGIN_CB_REGULAR;
+
+ plugin_register_dyn_cb__udata(&insn->cbs[PLUGIN_CB_INSN][index],
cb, flags, udata);
}
}