@@ -22,12 +22,12 @@
* non-NULL value of 'tb'. Strictly speaking pc is only needed for
* CF_PCREL, but it's used always for simplicity.
*/
-struct CPUJumpCache {
+typedef struct CPUJumpCache {
struct rcu_head rcu;
struct {
TranslationBlock *tb;
vaddr pc;
} array[TB_JMP_CACHE_SIZE];
-};
+} CPUJumpCache;
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */
@@ -7,6 +7,7 @@
#define ACCEL_TCG_VCPU_STATE_H
#include "hw/core/cpu.h"
+#include "tb-jmp-cache.h"
/**
* AccelCPUState: vCPU fields specific to TCG accelerator
@@ -16,6 +17,7 @@ struct AccelCPUState {
uint32_t cflags_next_tb;
sigjmp_buf jmp_env;
+ CPUJumpCache tb_jmp_cache;
#ifdef CONFIG_USER_ONLY
TaskState *ts;
@@ -483,8 +483,6 @@ struct CPUState {
AddressSpace *as;
MemoryRegion *memory;
- CPUJumpCache *tb_jmp_cache;
-
GArray *gdb_regs;
int gdb_num_regs;
int gdb_num_g_regs;
@@ -44,7 +44,6 @@ typedef struct CPUAddressSpace CPUAddressSpace;
typedef struct CPUArchState CPUArchState;
typedef struct CPUPluginState CPUPluginState;
typedef struct CpuInfoFast CpuInfoFast;
-typedef struct CPUJumpCache CPUJumpCache;
typedef struct CPUState CPUState;
typedef struct CPUTLBEntryFull CPUTLBEntryFull;
typedef struct DeviceListener DeviceListener;
@@ -261,7 +261,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
tcg_debug_assert(!(cflags & CF_INVALID));
hash = tb_jmp_cache_hash_func(pc);
- jc = cpu->tb_jmp_cache;
+ jc = &cpu->accel->tb_jmp_cache;
tb = qatomic_read(&jc->array[hash].tb);
if (likely(tb &&
@@ -1004,7 +1004,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
* for the fast lookup
*/
h = tb_jmp_cache_hash_func(pc);
- jc = cpu->tb_jmp_cache;
+ jc = &cpu->accel->tb_jmp_cache;
jc->array[h].pc = pc;
qatomic_set(&jc->array[h].tb, tb);
}
@@ -1083,7 +1083,6 @@ bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
tcg_target_initialized = true;
}
- cpu->tb_jmp_cache = g_new0(CPUJumpCache, 1);
tlb_init(cpu);
#ifndef CONFIG_USER_ONLY
tcg_iommu_init_notifier_list(cpu);
@@ -1101,5 +1100,5 @@ void tcg_exec_unrealizefn(CPUState *cpu)
#endif /* !CONFIG_USER_ONLY */
tlb_destroy(cpu);
- g_free_rcu(cpu->tb_jmp_cache, rcu);
+ g_free_rcu(&cpu->accel->tb_jmp_cache, rcu);
}
@@ -156,7 +156,7 @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
{
- CPUJumpCache *jc = cpu->tb_jmp_cache;
+ CPUJumpCache *jc = &cpu->accel->tb_jmp_cache;
int i, i0;
if (unlikely(!jc)) {
@@ -888,7 +888,7 @@ static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
uint32_t h = tb_jmp_cache_hash_func(tb->pc);
CPU_FOREACH(cpu) {
- CPUJumpCache *jc = cpu->tb_jmp_cache;
+ CPUJumpCache *jc = &cpu->accel->tb_jmp_cache;
if (qatomic_read(&jc->array[h].tb) == tb) {
qatomic_set(&jc->array[h].tb, NULL);
@@ -652,13 +652,14 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
*/
void tcg_flush_jmp_cache(CPUState *cpu)
{
- CPUJumpCache *jc = cpu->tb_jmp_cache;
+ CPUJumpCache *jc;
/* During early initialization, the cache may not yet be allocated. */
- if (unlikely(jc == NULL)) {
+ if (unlikely(cpu->accel == NULL)) {
return;
}
+ jc = &cpu->accel->tb_jmp_cache;
for (int i = 0; i < TB_JMP_CACHE_SIZE; i++) {
qatomic_set(&jc->array[i].tb, NULL);
}