Message ID | 20201214140314.18544-20-richard.henderson@linaro.org |
---|---|
State | Superseded |
Headers | show |
Series | Mirror map JIT memory for TCG | expand |
Guarding MAP_JIT with if (!splitwx) { flags |= MAP_JIT; } is better because MAP_JIT tells the kernel that we want a RWX mapping which is not the case. On iOS, special entitlements are needed for MAP_JIT. -j On Mon, Dec 14, 2020 at 6:03 AM Richard Henderson <richard.henderson@linaro.org> wrote: > > Cribbed from code posted by Joelle van Dyne <j@getutm.app>, > and rearranged to a cleaner structure. > > Reviewed-by: Joelle van Dyne <j@getutm.app> > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > accel/tcg/translate-all.c | 65 +++++++++++++++++++++++++++++++++++++++ > 1 file changed, 65 insertions(+) > > diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c > index 1931e65365..17df6c94fa 100644 > --- a/accel/tcg/translate-all.c > +++ b/accel/tcg/translate-all.c > @@ -1166,9 +1166,71 @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) > } > #endif /* CONFIG_POSIX */ > > +#ifdef CONFIG_DARWIN > +#include <mach/mach.h> > + > +extern kern_return_t mach_vm_remap(vm_map_t target_task, > + mach_vm_address_t *target_address, > + mach_vm_size_t size, > + mach_vm_offset_t mask, > + int flags, > + vm_map_t src_task, > + mach_vm_address_t src_address, > + boolean_t copy, > + vm_prot_t *cur_protection, > + vm_prot_t *max_protection, > + vm_inherit_t inheritance); > + > +static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp) > +{ > + kern_return_t ret; > + mach_vm_address_t buf_rw, buf_rx; > + vm_prot_t cur_prot, max_prot; > + > + /* Map the read-write portion via normal anon memory. */ > + if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE, > + MAP_PRIVATE | MAP_ANONYMOUS, errp)) { > + return false; > + } > + > + buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer; > + buf_rx = 0; > + ret = mach_vm_remap(mach_task_self(), > + &buf_rx, > + size, > + 0, > + VM_FLAGS_ANYWHERE, > + mach_task_self(), > + buf_rw, > + false, > + &cur_prot, > + &max_prot, > + VM_INHERIT_NONE); > + if (ret != KERN_SUCCESS) { > + /* TODO: Convert "ret" to a human readable error message. */ > + error_setg(errp, "vm_remap for jit splitwx failed"); > + munmap((void *)buf_rw, size); > + return false; > + } > + > + if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) { > + error_setg_errno(errp, errno, "mprotect for jit splitwx"); > + munmap((void *)buf_rx, size); > + munmap((void *)buf_rw, size); > + return false; > + } > + > + tcg_splitwx_diff = buf_rx - buf_rw; > + return true; > +} > +#endif /* CONFIG_DARWIN */ > + > static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp) > { > if (TCG_TARGET_SUPPORT_MIRROR) { > +#ifdef CONFIG_DARWIN > + return alloc_code_gen_buffer_splitwx_vmremap(size, errp); > +#endif > #ifdef CONFIG_POSIX > return alloc_code_gen_buffer_splitwx_memfd(size, errp); > #endif > @@ -1201,6 +1263,9 @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) > #ifdef CONFIG_TCG_INTERPRETER > /* The tcg interpreter does not need execute permission. */ > prot = PROT_READ | PROT_WRITE; > +#elif defined(CONFIG_DARWIN) > + /* Applicable to both iOS and macOS (Apple Silicon). */ > + flags |= MAP_JIT; > #endif > > return alloc_code_gen_buffer_anon(size, prot, flags, errp); > -- > 2.25.1 >
On 1/4/21 8:02 PM, Joelle van Dyne wrote: > Guarding MAP_JIT with > > if (!splitwx) { > flags |= MAP_JIT; > } > > is better because MAP_JIT tells the kernel that we want a RWX mapping > which is not the case. On iOS, special entitlements are needed for > MAP_JIT. Thanks, fixed. r~
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 1931e65365..17df6c94fa 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -1166,9 +1166,71 @@ static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp) } #endif /* CONFIG_POSIX */ +#ifdef CONFIG_DARWIN +#include <mach/mach.h> + +extern kern_return_t mach_vm_remap(vm_map_t target_task, + mach_vm_address_t *target_address, + mach_vm_size_t size, + mach_vm_offset_t mask, + int flags, + vm_map_t src_task, + mach_vm_address_t src_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance); + +static bool alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp) +{ + kern_return_t ret; + mach_vm_address_t buf_rw, buf_rx; + vm_prot_t cur_prot, max_prot; + + /* Map the read-write portion via normal anon memory. */ + if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, errp)) { + return false; + } + + buf_rw = (mach_vm_address_t)tcg_ctx->code_gen_buffer; + buf_rx = 0; + ret = mach_vm_remap(mach_task_self(), + &buf_rx, + size, + 0, + VM_FLAGS_ANYWHERE, + mach_task_self(), + buf_rw, + false, + &cur_prot, + &max_prot, + VM_INHERIT_NONE); + if (ret != KERN_SUCCESS) { + /* TODO: Convert "ret" to a human readable error message. */ + error_setg(errp, "vm_remap for jit splitwx failed"); + munmap((void *)buf_rw, size); + return false; + } + + if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) { + error_setg_errno(errp, errno, "mprotect for jit splitwx"); + munmap((void *)buf_rx, size); + munmap((void *)buf_rw, size); + return false; + } + + tcg_splitwx_diff = buf_rx - buf_rw; + return true; +} +#endif /* CONFIG_DARWIN */ + static bool alloc_code_gen_buffer_splitwx(size_t size, Error **errp) { if (TCG_TARGET_SUPPORT_MIRROR) { +#ifdef CONFIG_DARWIN + return alloc_code_gen_buffer_splitwx_vmremap(size, errp); +#endif #ifdef CONFIG_POSIX return alloc_code_gen_buffer_splitwx_memfd(size, errp); #endif @@ -1201,6 +1263,9 @@ static bool alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) #ifdef CONFIG_TCG_INTERPRETER /* The tcg interpreter does not need execute permission. */ prot = PROT_READ | PROT_WRITE; +#elif defined(CONFIG_DARWIN) + /* Applicable to both iOS and macOS (Apple Silicon). */ + flags |= MAP_JIT; #endif return alloc_code_gen_buffer_anon(size, prot, flags, errp);