@@ -2209,7 +2209,7 @@ static uint64_t do_ld_whole_be8(CPUState *cpu, uintptr_t ra,
MMULookupPageData *p, uint64_t ret_be)
{
int o = p->addr & 7;
- uint64_t x = load_atomic8_or_exit(cpu_env(cpu), ra, p->haddr - o);
+ uint64_t x = load_atomic8_or_exit(cpu, ra, p->haddr - o);
x = cpu_to_be64(x);
x <<= o * 8;
@@ -2229,7 +2229,7 @@ static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra,
MMULookupPageData *p, uint64_t ret_be)
{
int o = p->addr & 15;
- Int128 x, y = load_atomic16_or_exit(cpu_env(cpu), ra, p->haddr - o);
+ Int128 x, y = load_atomic16_or_exit(cpu, ra, p->haddr - o);
int size = p->size;
if (!HOST_BIG_ENDIAN) {
@@ -2373,7 +2373,7 @@ static uint16_t do_ld_2(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
}
} else {
/* Perform the load host endian, then swap if necessary. */
- ret = load_atom_2(cpu_env(cpu), ra, p->haddr, memop);
+ ret = load_atom_2(cpu, ra, p->haddr, memop);
if (memop & MO_BSWAP) {
ret = bswap16(ret);
}
@@ -2394,7 +2394,7 @@ static uint32_t do_ld_4(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
}
} else {
/* Perform the load host endian. */
- ret = load_atom_4(cpu_env(cpu), ra, p->haddr, memop);
+ ret = load_atom_4(cpu, ra, p->haddr, memop);
if (memop & MO_BSWAP) {
ret = bswap32(ret);
}
@@ -2415,7 +2415,7 @@ static uint64_t do_ld_8(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
}
} else {
/* Perform the load host endian. */
- ret = load_atom_8(cpu_env(cpu), ra, p->haddr, memop);
+ ret = load_atom_8(cpu, ra, p->haddr, memop);
if (memop & MO_BSWAP) {
ret = bswap64(ret);
}
@@ -2578,7 +2578,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
}
} else {
/* Perform the load host endian. */
- ret = load_atom_16(cpu_env(cpu), ra, l.page[0].haddr, l.memop);
+ ret = load_atom_16(cpu, ra, l.page[0].haddr, l.memop);
if (l.memop & MO_BSWAP) {
ret = bswap128(ret);
}
@@ -2893,7 +2893,7 @@ static void do_st_2(CPUState *cpu, MMULookupPageData *p, uint16_t val,
if (memop & MO_BSWAP) {
val = bswap16(val);
}
- store_atom_2(cpu_env(cpu), ra, p->haddr, memop, val);
+ store_atom_2(cpu, ra, p->haddr, memop, val);
}
}
@@ -2913,7 +2913,7 @@ static void do_st_4(CPUState *cpu, MMULookupPageData *p, uint32_t val,
if (memop & MO_BSWAP) {
val = bswap32(val);
}
- store_atom_4(cpu_env(cpu), ra, p->haddr, memop, val);
+ store_atom_4(cpu, ra, p->haddr, memop, val);
}
}
@@ -2933,7 +2933,7 @@ static void do_st_8(CPUState *cpu, MMULookupPageData *p, uint64_t val,
if (memop & MO_BSWAP) {
val = bswap64(val);
}
- store_atom_8(cpu_env(cpu), ra, p->haddr, memop, val);
+ store_atom_8(cpu, ra, p->haddr, memop, val);
}
}
@@ -3064,7 +3064,7 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
if (l.memop & MO_BSWAP) {
val = bswap128(val);
}
- store_atom_16(cpu_env(cpu), ra, l.page[0].haddr, l.memop, val);
+ store_atom_16(cpu, ra, l.page[0].haddr, l.memop, val);
}
return;
}
@@ -1002,7 +1002,7 @@ static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr,
tcg_debug_assert((mop & MO_SIZE) == MO_16);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
- ret = load_atom_2(env, ra, haddr, mop);
+ ret = load_atom_2(env_cpu(env), ra, haddr, mop);
clear_helper_retaddr();
if (mop & MO_BSWAP) {
@@ -1040,7 +1040,7 @@ static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr,
tcg_debug_assert((mop & MO_SIZE) == MO_32);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
- ret = load_atom_4(env, ra, haddr, mop);
+ ret = load_atom_4(env_cpu(env), ra, haddr, mop);
clear_helper_retaddr();
if (mop & MO_BSWAP) {
@@ -1078,7 +1078,7 @@ static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr,
tcg_debug_assert((mop & MO_SIZE) == MO_64);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
- ret = load_atom_8(env, ra, haddr, mop);
+ ret = load_atom_8(env_cpu(env), ra, haddr, mop);
clear_helper_retaddr();
if (mop & MO_BSWAP) {
@@ -1110,7 +1110,7 @@ static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr,
tcg_debug_assert((mop & MO_SIZE) == MO_128);
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
- ret = load_atom_16(env, ra, haddr, mop);
+ ret = load_atom_16(env_cpu(env), ra, haddr, mop);
clear_helper_retaddr();
if (mop & MO_BSWAP) {
@@ -1175,7 +1175,7 @@ static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
if (mop & MO_BSWAP) {
val = bswap16(val);
}
- store_atom_2(env, ra, haddr, mop, val);
+ store_atom_2(env_cpu(env), ra, haddr, mop, val);
clear_helper_retaddr();
}
@@ -1204,7 +1204,7 @@ static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
if (mop & MO_BSWAP) {
val = bswap32(val);
}
- store_atom_4(env, ra, haddr, mop, val);
+ store_atom_4(env_cpu(env), ra, haddr, mop, val);
clear_helper_retaddr();
}
@@ -1233,7 +1233,7 @@ static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
if (mop & MO_BSWAP) {
val = bswap64(val);
}
- store_atom_8(env, ra, haddr, mop, val);
+ store_atom_8(env_cpu(env), ra, haddr, mop, val);
clear_helper_retaddr();
}
@@ -1262,7 +1262,7 @@ static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
if (mop & MO_BSWAP) {
val = bswap128(val);
}
- store_atom_16(env, ra, haddr, mop, val);
+ store_atom_16(env_cpu(env), ra, haddr, mop, val);
clear_helper_retaddr();
}
@@ -26,7 +26,7 @@
* If the operation must be split into two operations to be
* examined separately for atomicity, return -lg2.
*/
-static int required_atomicity(CPUArchState *env, uintptr_t p, MemOp memop)
+static int required_atomicity(CPUState *cpu, uintptr_t p, MemOp memop)
{
MemOp atom = memop & MO_ATOM_MASK;
MemOp size = memop & MO_SIZE;
@@ -93,7 +93,7 @@ static int required_atomicity(CPUArchState *env, uintptr_t p, MemOp memop)
* host atomicity in order to avoid racing. This reduction
* avoids looping with cpu_loop_exit_atomic.
*/
- if (cpu_in_serial_context(env_cpu(env))) {
+ if (cpu_in_serial_context(cpu)) {
return MO_8;
}
return atmax;
@@ -139,14 +139,14 @@ static inline uint64_t load_atomic8(void *pv)
/**
* load_atomic8_or_exit:
- * @env: cpu context
+ * @cpu: generic cpu state
* @ra: host unwind address
* @pv: host address
*
* Atomically load 8 aligned bytes from @pv.
* If this is not possible, longjmp out to restart serially.
*/
-static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
+static uint64_t load_atomic8_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
{
if (HAVE_al8) {
return load_atomic8(pv);
@@ -168,19 +168,19 @@ static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
#endif
/* Ultimate fallback: re-execute in serial context. */
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
/**
* load_atomic16_or_exit:
- * @env: cpu context
+ * @cpu: generic cpu state
* @ra: host unwind address
* @pv: host address
*
* Atomically load 16 aligned bytes from @pv.
* If this is not possible, longjmp out to restart serially.
*/
-static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
+static Int128 load_atomic16_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
{
Int128 *p = __builtin_assume_aligned(pv, 16);
@@ -212,7 +212,7 @@ static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
}
/* Ultimate fallback: re-execute in serial context. */
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
/**
@@ -263,7 +263,7 @@ static uint64_t load_atom_extract_al8x2(void *pv)
/**
* load_atom_extract_al8_or_exit:
- * @env: cpu context
+ * @cpu: generic cpu state
* @ra: host unwind address
* @pv: host address
* @s: object size in bytes, @s <= 4.
@@ -273,7 +273,7 @@ static uint64_t load_atom_extract_al8x2(void *pv)
* 8-byte load and extract.
* The value is returned in the low bits of a uint32_t.
*/
-static uint32_t load_atom_extract_al8_or_exit(CPUArchState *env, uintptr_t ra,
+static uint32_t load_atom_extract_al8_or_exit(CPUState *cpu, uintptr_t ra,
void *pv, int s)
{
uintptr_t pi = (uintptr_t)pv;
@@ -281,12 +281,12 @@ static uint32_t load_atom_extract_al8_or_exit(CPUArchState *env, uintptr_t ra,
int shr = (HOST_BIG_ENDIAN ? 8 - s - o : o) * 8;
pv = (void *)(pi & ~7);
- return load_atomic8_or_exit(env, ra, pv) >> shr;
+ return load_atomic8_or_exit(cpu, ra, pv) >> shr;
}
/**
* load_atom_extract_al16_or_exit:
- * @env: cpu context
+ * @cpu: generic cpu state
* @ra: host unwind address
* @p: host address
* @s: object size in bytes, @s <= 8.
@@ -299,7 +299,7 @@ static uint32_t load_atom_extract_al8_or_exit(CPUArchState *env, uintptr_t ra,
*
* If this is not possible, longjmp out to restart serially.
*/
-static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra,
+static uint64_t load_atom_extract_al16_or_exit(CPUState *cpu, uintptr_t ra,
void *pv, int s)
{
uintptr_t pi = (uintptr_t)pv;
@@ -312,7 +312,7 @@ static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra,
* Provoke SIGBUS if possible otherwise.
*/
pv = (void *)(pi & ~7);
- r = load_atomic16_or_exit(env, ra, pv);
+ r = load_atomic16_or_exit(cpu, ra, pv);
r = int128_urshift(r, shr);
return int128_getlo(r);
@@ -394,7 +394,7 @@ static inline uint64_t load_atom_8_by_8_or_4(void *pv)
*
* Load 2 bytes from @p, honoring the atomicity of @memop.
*/
-static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
+static uint16_t load_atom_2(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop)
{
uintptr_t pi = (uintptr_t)pv;
@@ -410,7 +410,7 @@ static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
}
}
- atmax = required_atomicity(env, pi, memop);
+ atmax = required_atomicity(cpu, pi, memop);
switch (atmax) {
case MO_8:
return lduw_he_p(pv);
@@ -421,9 +421,9 @@ static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
return load_atomic4(pv - 1) >> 8;
}
if ((pi & 15) != 7) {
- return load_atom_extract_al8_or_exit(env, ra, pv, 2);
+ return load_atom_extract_al8_or_exit(cpu, ra, pv, 2);
}
- return load_atom_extract_al16_or_exit(env, ra, pv, 2);
+ return load_atom_extract_al16_or_exit(cpu, ra, pv, 2);
default:
g_assert_not_reached();
}
@@ -436,7 +436,7 @@ static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
*
* Load 4 bytes from @p, honoring the atomicity of @memop.
*/
-static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
+static uint32_t load_atom_4(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop)
{
uintptr_t pi = (uintptr_t)pv;
@@ -452,7 +452,7 @@ static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
}
}
- atmax = required_atomicity(env, pi, memop);
+ atmax = required_atomicity(cpu, pi, memop);
switch (atmax) {
case MO_8:
case MO_16:
@@ -466,9 +466,9 @@ static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
return load_atom_extract_al4x2(pv);
case MO_32:
if (!(pi & 4)) {
- return load_atom_extract_al8_or_exit(env, ra, pv, 4);
+ return load_atom_extract_al8_or_exit(cpu, ra, pv, 4);
}
- return load_atom_extract_al16_or_exit(env, ra, pv, 4);
+ return load_atom_extract_al16_or_exit(cpu, ra, pv, 4);
default:
g_assert_not_reached();
}
@@ -481,7 +481,7 @@ static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
*
* Load 8 bytes from @p, honoring the atomicity of @memop.
*/
-static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
+static uint64_t load_atom_8(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop)
{
uintptr_t pi = (uintptr_t)pv;
@@ -498,12 +498,12 @@ static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
return load_atom_extract_al16_or_al8(pv, 8);
}
- atmax = required_atomicity(env, pi, memop);
+ atmax = required_atomicity(cpu, pi, memop);
if (atmax == MO_64) {
if (!HAVE_al8 && (pi & 7) == 0) {
- load_atomic8_or_exit(env, ra, pv);
+ load_atomic8_or_exit(cpu, ra, pv);
}
- return load_atom_extract_al16_or_exit(env, ra, pv, 8);
+ return load_atom_extract_al16_or_exit(cpu, ra, pv, 8);
}
if (HAVE_al8_fast) {
return load_atom_extract_al8x2(pv);
@@ -519,7 +519,7 @@ static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
if (HAVE_al8) {
return load_atom_extract_al8x2(pv);
}
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
default:
g_assert_not_reached();
}
@@ -532,7 +532,7 @@ static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
*
* Load 16 bytes from @p, honoring the atomicity of @memop.
*/
-static Int128 load_atom_16(CPUArchState *env, uintptr_t ra,
+static Int128 load_atom_16(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop)
{
uintptr_t pi = (uintptr_t)pv;
@@ -548,7 +548,7 @@ static Int128 load_atom_16(CPUArchState *env, uintptr_t ra,
return atomic16_read_ro(pv);
}
- atmax = required_atomicity(env, pi, memop);
+ atmax = required_atomicity(cpu, pi, memop);
switch (atmax) {
case MO_8:
memcpy(&r, pv, 16);
@@ -563,20 +563,20 @@ static Int128 load_atom_16(CPUArchState *env, uintptr_t ra,
break;
case MO_64:
if (!HAVE_al8) {
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
a = load_atomic8(pv);
b = load_atomic8(pv + 8);
break;
case -MO_64:
if (!HAVE_al8) {
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
a = load_atom_extract_al8x2(pv);
b = load_atom_extract_al8x2(pv + 8);
break;
case MO_128:
- return load_atomic16_or_exit(env, ra, pv);
+ return load_atomic16_or_exit(cpu, ra, pv);
default:
g_assert_not_reached();
}
@@ -857,7 +857,7 @@ static uint64_t store_whole_le16(void *pv, int size, Int128 val_le)
*
* Store 2 bytes to @p, honoring the atomicity of @memop.
*/
-static void store_atom_2(CPUArchState *env, uintptr_t ra,
+static void store_atom_2(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop, uint16_t val)
{
uintptr_t pi = (uintptr_t)pv;
@@ -868,7 +868,7 @@ static void store_atom_2(CPUArchState *env, uintptr_t ra,
return;
}
- atmax = required_atomicity(env, pi, memop);
+ atmax = required_atomicity(cpu, pi, memop);
if (atmax == MO_8) {
stw_he_p(pv, val);
return;
@@ -897,7 +897,7 @@ static void store_atom_2(CPUArchState *env, uintptr_t ra,
g_assert_not_reached();
}
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
/**
@@ -908,7 +908,7 @@ static void store_atom_2(CPUArchState *env, uintptr_t ra,
*
* Store 4 bytes to @p, honoring the atomicity of @memop.
*/
-static void store_atom_4(CPUArchState *env, uintptr_t ra,
+static void store_atom_4(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop, uint32_t val)
{
uintptr_t pi = (uintptr_t)pv;
@@ -919,7 +919,7 @@ static void store_atom_4(CPUArchState *env, uintptr_t ra,
return;
}
- atmax = required_atomicity(env, pi, memop);
+ atmax = required_atomicity(cpu, pi, memop);
switch (atmax) {
case MO_8:
stl_he_p(pv, val);
@@ -961,7 +961,7 @@ static void store_atom_4(CPUArchState *env, uintptr_t ra,
return;
}
}
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
default:
g_assert_not_reached();
}
@@ -975,7 +975,7 @@ static void store_atom_4(CPUArchState *env, uintptr_t ra,
*
* Store 8 bytes to @p, honoring the atomicity of @memop.
*/
-static void store_atom_8(CPUArchState *env, uintptr_t ra,
+static void store_atom_8(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop, uint64_t val)
{
uintptr_t pi = (uintptr_t)pv;
@@ -986,7 +986,7 @@ static void store_atom_8(CPUArchState *env, uintptr_t ra,
return;
}
- atmax = required_atomicity(env, pi, memop);
+ atmax = required_atomicity(cpu, pi, memop);
switch (atmax) {
case MO_8:
stq_he_p(pv, val);
@@ -1029,7 +1029,7 @@ static void store_atom_8(CPUArchState *env, uintptr_t ra,
default:
g_assert_not_reached();
}
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
/**
@@ -1040,7 +1040,7 @@ static void store_atom_8(CPUArchState *env, uintptr_t ra,
*
* Store 16 bytes to @p, honoring the atomicity of @memop.
*/
-static void store_atom_16(CPUArchState *env, uintptr_t ra,
+static void store_atom_16(CPUState *cpu, uintptr_t ra,
void *pv, MemOp memop, Int128 val)
{
uintptr_t pi = (uintptr_t)pv;
@@ -1052,7 +1052,7 @@ static void store_atom_16(CPUArchState *env, uintptr_t ra,
return;
}
- atmax = required_atomicity(env, pi, memop);
+ atmax = required_atomicity(cpu, pi, memop);
a = HOST_BIG_ENDIAN ? int128_gethi(val) : int128_getlo(val);
b = HOST_BIG_ENDIAN ? int128_getlo(val) : int128_gethi(val);
@@ -1111,5 +1111,5 @@ static void store_atom_16(CPUArchState *env, uintptr_t ra,
default:
g_assert_not_reached();
}
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}