@@ -241,7 +241,6 @@ typedef struct SavedIOTLB {
struct KVMState;
struct kvm_run;
-struct hax_vcpu_state;
struct hvf_vcpu_state;
/* work queue */
@@ -309,6 +308,7 @@ struct qemu_work_item;
* @next_cpu: Next CPU sharing TB cache.
* @opaque: User data.
* @mem_io_pc: Host Program Counter at which the memory was accessed.
+ * @accel: Pointer to accelerator specific state.
* @kvm_fd: vCPU file descriptor for KVM.
* @work_mutex: Lock to prevent multiple access to @work_list.
* @work_list: List of pending asynchronous work.
@@ -424,6 +424,7 @@ struct CPUState {
uint32_t can_do_io;
int32_t exception_index;
+ AccelCPUState *accel;
/* shared by kvm, hax and hvf */
bool vcpu_dirty;
@@ -443,8 +444,6 @@ struct CPUState {
/* Used for user-only emulation of prctl(PR_SET_UNALIGN). */
bool prctl_unalign_sigbus;
- struct hax_vcpu_state *accel;
-
struct hvf_vcpu_state *hvf;
/* track IOMMUs whose translations we've cached in the TCG TLB */
@@ -21,6 +21,7 @@
* Incomplete struct types
* Please keep this list in case-insensitive alphabetical order.
*/
+typedef struct AccelCPUState AccelCPUState;
typedef struct AccelState AccelState;
typedef struct AdapterInfo AdapterInfo;
typedef struct AddressSpace AddressSpace;
@@ -25,7 +25,8 @@ typedef HANDLE hax_fd;
#endif
extern struct hax_state hax_global;
-struct hax_vcpu_state {
+
+struct AccelCPUState {
hax_fd fd;
int vcpu_id;
struct hax_tunnel *tunnel;
@@ -46,7 +47,7 @@ struct hax_vm {
hax_fd fd;
int id;
int numvcpus;
- struct hax_vcpu_state **vcpus;
+ AccelCPUState **vcpus;
};
/* Functions exported to host specific mode */
@@ -57,7 +58,7 @@ int valid_hax_tunnel_size(uint16_t size);
int hax_mod_version(struct hax_state *hax, struct hax_module_version *version);
int hax_inject_interrupt(CPUArchState *env, int vector);
struct hax_vm *hax_vm_create(struct hax_state *hax, int max_cpus);
-int hax_vcpu_run(struct hax_vcpu_state *vcpu);
+int hax_vcpu_run(AccelCPUState *vcpu);
int hax_vcpu_create(int id);
void hax_kick_vcpu_thread(CPUState *cpu);
@@ -76,7 +77,7 @@ int hax_host_create_vm(struct hax_state *hax, int *vm_id);
hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id);
int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid);
hax_fd hax_host_open_vcpu(int vmid, int vcpuid);
-int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu);
+int hax_host_setup_vcpu_channel(AccelCPUState *vcpu);
hax_fd hax_mod_open(void);
void hax_memory_init(void);
@@ -62,7 +62,7 @@ int valid_hax_tunnel_size(uint16_t size)
hax_fd hax_vcpu_get_fd(CPUArchState *env)
{
- struct hax_vcpu_state *vcpu = env_cpu(env)->accel;
+ AccelCPUState *vcpu = env_cpu(env)->accel;
if (!vcpu) {
return HAX_INVALID_FD;
}
@@ -136,7 +136,7 @@ static int hax_version_support(struct hax_state *hax)
int hax_vcpu_create(int id)
{
- struct hax_vcpu_state *vcpu = NULL;
+ AccelCPUState *vcpu = NULL;
int ret;
if (!hax_global.vm) {
@@ -149,7 +149,7 @@ int hax_vcpu_create(int id)
return 0;
}
- vcpu = g_new0(struct hax_vcpu_state, 1);
+ vcpu = g_new0(AccelCPUState, 1);
ret = hax_host_create_vcpu(hax_global.vm->fd, id);
if (ret) {
@@ -188,7 +188,7 @@ int hax_vcpu_create(int id)
int hax_vcpu_destroy(CPUState *cpu)
{
- struct hax_vcpu_state *vcpu = cpu->accel;
+ AccelCPUState *vcpu = cpu->accel;
if (!hax_global.vm) {
fprintf(stderr, "vcpu %x destroy failed, vm is null\n", vcpu->vcpu_id);
@@ -263,7 +263,7 @@ struct hax_vm *hax_vm_create(struct hax_state *hax, int max_cpus)
}
vm->numvcpus = max_cpus;
- vm->vcpus = g_new0(struct hax_vcpu_state *, vm->numvcpus);
+ vm->vcpus = g_new0(AccelCPUState *, vm->numvcpus);
for (i = 0; i < vm->numvcpus; i++) {
vm->vcpus[i] = NULL;
}
@@ -415,7 +415,7 @@ static int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port,
static int hax_vcpu_interrupt(CPUArchState *env)
{
CPUState *cpu = env_cpu(env);
- struct hax_vcpu_state *vcpu = cpu->accel;
+ AccelCPUState *vcpu = cpu->accel;
struct hax_tunnel *ht = vcpu->tunnel;
/*
@@ -447,7 +447,7 @@ static int hax_vcpu_interrupt(CPUArchState *env)
void hax_raise_event(CPUState *cpu)
{
- struct hax_vcpu_state *vcpu = cpu->accel;
+ AccelCPUState *vcpu = cpu->accel;
if (!vcpu) {
return;
@@ -468,7 +468,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
int ret = 0;
CPUState *cpu = env_cpu(env);
X86CPU *x86_cpu = X86_CPU(cpu);
- struct hax_vcpu_state *vcpu = cpu->accel;
+ AccelCPUState *vcpu = cpu->accel;
struct hax_tunnel *ht = vcpu->tunnel;
if (!hax_enabled()) {
@@ -205,7 +205,7 @@ hax_fd hax_host_open_vcpu(int vmid, int vcpuid)
return fd;
}
-int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
+int hax_host_setup_vcpu_channel(AccelCPUState *vcpu)
{
int ret;
struct hax_tunnel_info info;
@@ -227,7 +227,7 @@ int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
return 0;
}
-int hax_vcpu_run(struct hax_vcpu_state *vcpu)
+int hax_vcpu_run(AccelCPUState *vcpu)
{
return ioctl(vcpu->fd, HAX_VCPU_IOCTL_RUN, NULL);
}
@@ -301,7 +301,7 @@ hax_fd hax_host_open_vcpu(int vmid, int vcpuid)
return hDeviceVCPU;
}
-int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
+int hax_host_setup_vcpu_channel(AccelCPUState *vcpu)
{
hax_fd hDeviceVCPU = vcpu->fd;
int ret;
@@ -327,7 +327,7 @@ int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
return 0;
}
-int hax_vcpu_run(struct hax_vcpu_state *vcpu)
+int hax_vcpu_run(AccelCPUState *vcpu)
{
int ret;
HANDLE hDeviceVCPU = vcpu->fd;
@@ -995,7 +995,7 @@ nvmm_init_vcpu(CPUState *cpu)
}
cpu->vcpu_dirty = true;
- cpu->accel = (struct hax_vcpu_state *)qcpu;
+ cpu->accel = qcpu;
return 0;
}
@@ -2258,7 +2258,7 @@ int whpx_init_vcpu(CPUState *cpu)
vcpu->interruptable = true;
cpu->vcpu_dirty = true;
- cpu->accel = (struct hax_vcpu_state *)vcpu;
+ cpu->accel = vcpu;
max_vcpu_index = max(max_vcpu_index, cpu->cpu_index);
qemu_add_vm_change_state_handler(whpx_cpu_update_state, cpu->env_ptr);