@@ -74,7 +74,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
+ PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
ATOMIC_MMU_IDX);
@@ -95,7 +96,9 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R;
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
+ PAGE_READ, retaddr);
+ DATA_TYPE val;
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
ATOMIC_MMU_IDX);
@@ -109,7 +112,8 @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W;
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
+ PAGE_WRITE, retaddr);
uint16_t info = trace_mem_build_info(SHIFT, false, 0, true,
ATOMIC_MMU_IDX);
@@ -123,7 +127,8 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
+ PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false,
ATOMIC_MMU_IDX);
@@ -139,7 +144,8 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
{ \
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
+ PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
ATOMIC_MMU_IDX); \
@@ -161,7 +167,8 @@ GEN_ATOMIC_HELPER(xor_fetch)
#undef GEN_ATOMIC_HELPER
-/* These helpers are, as a whole, full barriers. Within the helper,
+/*
+ * These helpers are, as a whole, full barriers. Within the helper,
* the leading barrier is explicit and the trailing barrier is within
* cmpxchg primitive.
*
@@ -172,7 +179,8 @@ GEN_ATOMIC_HELPER(xor_fetch)
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
{ \
- XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \
+ XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
+ PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE cmp, old, new, val = xval; \
uint16_t info = trace_mem_build_info(SHIFT, false, 0, false, \
ATOMIC_MMU_IDX); \
@@ -217,7 +225,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
+ PAGE_READ | PAGE_WRITE, retaddr);
DATA_TYPE ret;
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
ATOMIC_MMU_IDX);
@@ -238,7 +247,9 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP_R;
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
+ PAGE_READ, retaddr);
+ DATA_TYPE val;
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
ATOMIC_MMU_IDX);
@@ -252,7 +263,8 @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_W;
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
+ PAGE_WRITE, retaddr);
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, true,
ATOMIC_MMU_IDX);
@@ -268,7 +280,8 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW;
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
+ PAGE_READ | PAGE_WRITE, retaddr);
ABI_TYPE ret;
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, false,
ATOMIC_MMU_IDX);
@@ -284,7 +297,8 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \
{ \
- DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
+ PAGE_READ | PAGE_WRITE, retaddr); \
DATA_TYPE ret; \
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
false, ATOMIC_MMU_IDX); \
@@ -315,7 +329,8 @@ GEN_ATOMIC_HELPER(xor_fetch)
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \
{ \
- XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP_RW; \
+ XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
+ PAGE_READ | PAGE_WRITE, retaddr); \
XDATA_TYPE ldo, ldn, old, new, val = xval; \
uint16_t info = trace_mem_build_info(SHIFT, false, MO_BSWAP, \
false, ATOMIC_MMU_IDX); \
@@ -2693,12 +2693,7 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
#define ATOMIC_NAME(X) \
glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
-#define ATOMIC_MMU_LOOKUP_RW \
- atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr)
-#define ATOMIC_MMU_LOOKUP_R \
- atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, retaddr)
-#define ATOMIC_MMU_LOOKUP_W \
- atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, retaddr)
+
#define ATOMIC_MMU_CLEANUP
#define ATOMIC_MMU_IDX get_mmuidx(oi)
@@ -1221,9 +1221,14 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
return ret;
}
-/* Do not allow unaligned operations to proceed. Return the host address. */
+/*
+ * Do not allow unaligned operations to proceed. Return the host address.
+ *
+ * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
+ */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
- int size, uintptr_t retaddr)
+ TCGMemOpIdx oi, int size, int prot,
+ uintptr_t retaddr)
{
/* Enforce qemu required alignment. */
if (unlikely(addr & (size - 1))) {
@@ -1243,9 +1248,6 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
#define ATOMIC_NAME(X) \
glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
-#define ATOMIC_MMU_LOOKUP_RW atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr)
-#define ATOMIC_MMU_LOOKUP_R ATOMIC_MMU_LOOKUP_RW
-#define ATOMIC_MMU_LOOKUP_W ATOMIC_MMU_LOOKUP_RW
#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
#define ATOMIC_MMU_IDX MMU_USER_IDX