@@ -212,7 +212,7 @@ G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
#define PAGE_READ 0x0001
#define PAGE_WRITE 0x0002
#define PAGE_EXEC 0x0004
-#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
+#define PAGE_RWX (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
#define PAGE_VALID 0x0008
/*
* Original state of the write flag (used when tracking self-modifying code)
@@ -765,7 +765,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
if (prot & PAGE_EXEC) {
prot = (prot & ~PAGE_EXEC) | PAGE_READ;
}
- mprotect((void *)g2h_untagged(start), len, prot & PAGE_BITS);
+ mprotect((void *)g2h_untagged(start), len, prot & PAGE_RWX);
}
mmap_unlock();
@@ -96,7 +96,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot)
end = host_end;
}
ret = mprotect(g2h_untagged(host_start),
- qemu_host_page_size, prot1 & PAGE_BITS);
+ qemu_host_page_size, prot1 & PAGE_RWX);
if (ret != 0)
goto error;
host_start += qemu_host_page_size;
@@ -107,7 +107,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot)
prot1 |= page_get_flags(addr);
}
ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
- qemu_host_page_size, prot1 & PAGE_BITS);
+ qemu_host_page_size, prot1 & PAGE_RWX);
if (ret != 0)
goto error;
host_end -= qemu_host_page_size;
@@ -174,7 +174,7 @@ static int mmap_frag(abi_ulong real_start,
return -1;
prot1 = prot;
}
- prot1 &= PAGE_BITS;
+ prot1 &= PAGE_RWX;
prot_new = prot | prot1;
if (fd != -1) {
@@ -2361,7 +2361,7 @@ static bool zero_bss(abi_ulong start_bss, abi_ulong end_bss,
if (start_bss < align_bss) {
int flags = page_get_flags(start_bss);
- if (!(flags & PAGE_BITS)) {
+ if (!(flags & PAGE_RWX)) {
/*
* The whole address space of the executable was reserved
* at the start, therefore all pages will be VALID.
@@ -117,7 +117,7 @@ static void shm_region_rm_complete(abi_ptr start, abi_ptr last)
static int validate_prot_to_pageflags(int prot)
{
int valid = PROT_READ | PROT_WRITE | PROT_EXEC | TARGET_PROT_SEM;
- int page_flags = (prot & PAGE_BITS) | PAGE_VALID;
+ int page_flags = (prot & PAGE_RWX) | PAGE_VALID;
#ifdef TARGET_AARCH64
{
@@ -333,7 +333,7 @@ int cris_mmu_translate(struct cris_mmu_result *res,
if (!cris_mmu_enabled(env->sregs[SFR_RW_GC_CFG])) {
res->phy = vaddr;
- res->prot = PAGE_BITS;
+ res->prot = PAGE_RWX;
goto done;
}
@@ -344,7 +344,7 @@ int cris_mmu_translate(struct cris_mmu_result *res,
miss = 0;
base = cris_mmu_translate_seg(env, seg);
res->phy = base | (0x0fffffff & vaddr);
- res->prot = PAGE_BITS;
+ res->prot = PAGE_RWX;
} else {
miss = cris_mmu_translate_page(res, env, vaddr, access_type,
is_user, debug);
@@ -51,7 +51,7 @@ bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
if (mmu_idx == MMU_NOMMU_IDX) {
/* MMU disabled or not available. */
address &= TARGET_PAGE_MASK;
- prot = PAGE_BITS;
+ prot = PAGE_RWX;
tlb_set_page_with_attrs(cs, address, address, attrs, prot, mmu_idx,
TARGET_PAGE_SIZE);
return true;