@@ -482,6 +482,31 @@ static abi_long mmap_end(abi_ulong start, abi_ulong last,
return start;
}
+/*
+ * Special case host page size == target page size,
+ * where there are no edge conditions.
+ */
+static abi_long mmap_h_eq_g(abi_ulong start, abi_ulong len,
+ int host_prot, int flags, int page_flags,
+ int fd, off_t offset)
+{
+ void *p, *want_p = g2h_untagged(start);
+ abi_ulong last;
+
+ p = mmap(want_p, len, host_prot, flags, fd, offset);
+ if (p == MAP_FAILED) {
+ return -1;
+ }
+ if ((flags & MAP_FIXED_NOREPLACE) && p != want_p) {
+ errno = EEXIST;
+ return -1;
+ }
+
+ start = h2g(p);
+ last = start + len - 1;
+ return mmap_end(start, last, start, last, flags, page_flags);
+}
+
static abi_long target_mmap__locked(abi_ulong start, abi_ulong len,
int target_prot, int flags, int page_flags,
int fd, off_t offset)
@@ -490,6 +515,7 @@ static abi_long target_mmap__locked(abi_ulong start, abi_ulong len,
abi_ulong ret, last, real_start, real_last, retaddr, host_len;
abi_ulong passthrough_start = -1, passthrough_last = 0;
off_t host_offset;
+ int host_prot;
real_start = start & -host_page_size;
host_offset = offset & -host_page_size;
@@ -498,16 +524,33 @@ static abi_long target_mmap__locked(abi_ulong start, abi_ulong len,
* For reserved_va, we are in full control of the allocation.
* Find a suitible hole and convert to MAP_FIXED.
*/
- if (reserved_va && !(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
- host_len = len + offset - host_offset;
- start = mmap_find_vma(real_start, host_len,
- MAX(host_page_size, TARGET_PAGE_SIZE));
- if (start == (abi_ulong)-1) {
- errno = ENOMEM;
- return -1;
+ if (reserved_va) {
+ if (flags & MAP_FIXED_NOREPLACE) {
+ /* Validate that the chosen range is empty. */
+ if (!page_check_range_empty(start, start + len - 1)) {
+ errno = EEXIST;
+ return -1;
+ }
+ flags = (flags & ~MAP_FIXED_NOREPLACE) | MAP_FIXED;
+ } else if (!(flags & MAP_FIXED)) {
+ size_t real_len = len + offset - host_offset;
+ abi_ulong align = MAX(host_page_size, TARGET_PAGE_SIZE);
+
+ start = mmap_find_vma(real_start, real_len, align);
+ if (start == (abi_ulong)-1) {
+ errno = ENOMEM;
+ return -1;
+ }
+ start += offset - host_offset;
+ flags |= MAP_FIXED;
}
- start += offset - host_offset;
- flags |= MAP_FIXED;
+ }
+
+ host_prot = target_to_host_prot(target_prot);
+
+ if (host_page_size == TARGET_PAGE_SIZE) {
+ return mmap_h_eq_g(start, len, host_prot, flags,
+ page_flags, fd, offset);
}
/*
@@ -543,12 +586,10 @@ static abi_long target_mmap__locked(abi_ulong start, abi_ulong len,
if (!(flags & (MAP_FIXED | MAP_FIXED_NOREPLACE))) {
uintptr_t host_start;
- int host_prot;
void *p;
host_len = len + offset - host_offset;
host_len = ROUND_UP(host_len, host_page_size);
- host_prot = target_to_host_prot(target_prot);
/* Note: we prefer to control the mapping address. */
p = mmap(g2h_untagged(start), host_len, host_prot,
@@ -671,8 +712,7 @@ static abi_long target_mmap__locked(abi_ulong start, abi_ulong len,
len1 = real_last - real_start + 1;
want_p = g2h_untagged(real_start);
- p = mmap(want_p, len1, target_to_host_prot(target_prot),
- flags, fd, offset1);
+ p = mmap(want_p, len1, host_prot, flags, fd, offset1);
if (p != want_p) {
if (p != MAP_FAILED) {
munmap(p, len1);
Move the MAX_FIXED_NOREPLACE check for reserved_va earlier. Move the computation of host_prot earlier. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- linux-user/mmap.c | 66 +++++++++++++++++++++++++++++++++++++---------- 1 file changed, 53 insertions(+), 13 deletions(-)