@@ -68,7 +68,7 @@ union qemu_plugin_cb_sig {
enum plugin_dyn_cb_type {
PLUGIN_CB_REGULAR,
PLUGIN_CB_MEM_REGULAR,
- PLUGIN_CB_INLINE,
+ PLUGIN_CB_INLINE_ADD_U64,
};
/*
@@ -120,7 +120,7 @@ static void gen_udata_cb(struct qemu_plugin_dyn_cb *cb)
tcg_temp_free_i32(cpu_index);
}
-static void gen_inline_cb(struct qemu_plugin_dyn_cb *cb)
+static void gen_inline_add_u64_cb(struct qemu_plugin_dyn_cb *cb)
{
GArray *arr = cb->inline_insn.entry.score->data;
size_t offset = cb->inline_insn.entry.offset;
@@ -165,8 +165,8 @@ static void inject_cb(struct qemu_plugin_dyn_cb *cb)
case PLUGIN_CB_REGULAR:
gen_udata_cb(cb);
break;
- case PLUGIN_CB_INLINE:
- gen_inline_cb(cb);
+ case PLUGIN_CB_INLINE_ADD_U64:
+ gen_inline_add_u64_cb(cb);
break;
default:
g_assert_not_reached();
@@ -316,6 +316,16 @@ static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr)
return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1);
}
+static enum plugin_dyn_cb_type op_to_cb_type(enum qemu_plugin_op op)
+{
+ switch (op) {
+ case QEMU_PLUGIN_INLINE_ADD_U64:
+ return PLUGIN_CB_INLINE_ADD_U64;
+ default:
+ g_assert_not_reached();
+ }
+}
+
void plugin_register_inline_op_on_entry(GArray **arr,
enum qemu_plugin_mem_rw rw,
enum qemu_plugin_op op,
@@ -326,7 +336,7 @@ void plugin_register_inline_op_on_entry(GArray **arr,
dyn_cb = plugin_get_dyn_cb(arr);
dyn_cb->userp = NULL;
- dyn_cb->type = PLUGIN_CB_INLINE;
+ dyn_cb->type = op_to_cb_type(op);
dyn_cb->rw = rw;
dyn_cb->inline_insn.entry = entry;
dyn_cb->inline_insn.op = op;
@@ -551,7 +561,7 @@ void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
cb->regular.f.vcpu_mem(cpu->cpu_index, make_plugin_meminfo(oi, rw),
vaddr, cb->userp);
break;
- case PLUGIN_CB_INLINE:
+ case PLUGIN_CB_INLINE_ADD_U64:
exec_inline_op(cb, cpu->cpu_index);
break;
default: